From 998705c38f09c7075b17eff1549e5c92c381e27a Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Sun, 4 Feb 2018 14:05:43 -0500 Subject: [PATCH 01/51] Added fitting procedure for sfnr and snr parameters --- brainiak/utils/fmrisim.py | 107 +++++++++++++++++++++++++----------- tests/utils/test_fmrisim.py | 55 ++++++++---------- 2 files changed, 99 insertions(+), 63 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 69cd91de8..3a7dfea56 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1108,15 +1108,16 @@ def _calc_sfnr(volume, def _calc_snr(volume, mask, tr=None, + remove_baseline=False, ): """ Calculate the the SNR of a volume Calculates the Signal to Noise Ratio, the mean of brain voxels divided by the standard deviation across non-brain voxels. Specify a TR value to calculate the mean and standard deviation for that TR. To - calculate the standard deviation this subtracts any baseline structure - in the non-brain voxels, hence getting at deviations due to the system - noise and not something like high baseline values in non-brain parts of - the body. + calculate the standard deviation of non-brain voxels we can subtract + any baseline structure away first, hence getting at deviations due to the + system noise and not something like high baseline values in non-brain + parts of the body. Parameters ---------- @@ -1130,6 +1131,9 @@ def _calc_snr(volume, tr : int Integer specifying TR to calculate the SNR for + remove_baseline : bool + Is the baseline (a.k.a. temporal mean activation) removed. + Returns ------- @@ -1147,12 +1151,17 @@ def _calc_snr(volume, nonbrain_voxels = volume[:, :, :, tr][mask == 0] # Find the mean of the non_brain voxels (deals with structure that may - # exist outside of the mask) - nonbrain_voxels_mean = np.mean(volume[mask == 0], 1) # Take the means of each voxel over time mean_voxels = np.nanmean(brain_voxels) - std_voxels = np.nanstd(nonbrain_voxels - nonbrain_voxels_mean) + + # Find the standard deviation of the voxels + if remove_baseline == True: + # exist outside of the mask) + nonbrain_voxels_mean = np.mean(volume[mask == 0], 1) + std_voxels = np.nanstd(nonbrain_voxels - nonbrain_voxels_mean) + else: + std_voxels = np.nanstd(nonbrain_voxels) # Return the snr return mean_voxels / std_voxels @@ -1895,10 +1904,10 @@ def _generate_noise_temporal(stimfunction_tr, def mask_brain(volume, template_name=None, mask_threshold=None, - mask_self=0, + mask_self=True, ): """ Mask the simulated volume - This creates a mask specifying the likelihood (kind of) a voxel is + This creates a mask specifying the approximate likelihood that a voxel is part of the brain. All values are bounded to the range of 0 to 1. An appropriate threshold to isolate brain voxels is >0.2 @@ -1911,8 +1920,8 @@ def mask_brain(volume, template_name : str What is the path to the template to be loaded? If empty then it - defaults to an MNI152 grey matter mask. This is ignored if mask_self is - True. + defaults to an MNI152 grey matter mask. This is ignored if mask_self + is True. mask_threshold : float What is the threshold (0 -> 1) for including a voxel in the mask? If @@ -1921,9 +1930,10 @@ def mask_brain(volume, the minima before that peak as the threshold. Won't work when the data is not bimodal. - mask_self : bool + mask_self : bool or None If set to true then it makes a mask from the volume supplied (by - averaging across time points and changing the range). + averaging across time points and changing the range). If it is set + to false then it will use the template_name as an input. Returns ---------- @@ -1950,7 +1960,7 @@ def mask_brain(volume, else: mask_raw = np.load(template_name) - # Make the masks 3d + # Make the masks 3dremove_baseline if len(mask_raw.shape) == 3: mask_raw = np.array(mask_raw) elif len(mask_raw.shape) == 4 and mask_raw.shape[3] == 1: @@ -2079,6 +2089,8 @@ def generate_noise(dimensions, template, mask=None, noise_dict=None, + temporal_proportion=0.5, + iterations=10, ): """ Generate the noise to be added to the signal. Default noise parameters will create a noise volume with a standard @@ -2107,8 +2119,19 @@ def generate_noise(dimensions, noise_dict : dictionary, float This is a dictionary which describes the noise parameters of the - data. If there are no other variables provided then it will use default - values + data. If there are no other variables provided then it will use + default values + + temporal_proportion, float + What is the proportion of the temporal variance (as specified by the + SFNR noise parameter) that is accounted for by the system noise. If + this number is high then all of the temporal variability is due to + system noise, if it is low then all of the temporal variability is + due to brain variability. + + iterations : int + How many steps of fitting the SFNR and SNR values will be performed. + Usually converges after < 10. Returns ---------- @@ -2156,6 +2179,10 @@ def generate_noise(dimensions, # Create the base (this inverts the process to make the template) base = template * noise_dict['max_activity'] + # Reshape the base (to be the same size as the volume to be created) + base = base.reshape(dimensions[0], dimensions[1], dimensions[2], 1) + base = np.ones(dimensions_tr) * base + # What is the mean signal of the non masked voxels in this template? mean_signal = (base[mask > 0]).mean() @@ -2163,28 +2190,46 @@ def generate_noise(dimensions, # variability temporal_sd = (mean_signal / noise_dict['sfnr']) - # Calculate the sd that is necessary to be combined with itself in order - # to generate the temporal_sd - temporal_sd_element = np.sqrt(temporal_sd ** 2 / 2) + # Calculate the temporal sd of the system noise (as opposed to the noise + # attributed to the functional variability). + temporal_sd_system = np.sqrt((temporal_sd ** 2) * temporal_proportion) # What is the standard deviation of the background activity spatial_sd = mean_signal / noise_dict['snr'] - # Set up the machine noise - noise_system = _generate_noise_system(dimensions_tr=dimensions_tr, - spatial_sd=spatial_sd, - temporal_sd=temporal_sd_element, - ) + # Cycle through the iterations + spat_sd_orig = np.copy(spatial_sd) + temp_sd_orig = np.copy(temporal_sd_system) + for iteration in list(range(iterations + 1)): + # Set up the machine noise + noise_system = _generate_noise_system(dimensions_tr=dimensions_tr, + spatial_sd=spatial_sd, + temporal_sd=temporal_sd_system, + ) - # Reshape the base (to be the same size as the volume to be created) - base = base.reshape(dimensions[0], dimensions[1], dimensions[2], 1) - base = np.ones(dimensions_tr) * base + # Sum up the noise of the brain + noise = base + (noise_temporal * (1 - temporal_sd_system)) + \ + noise_system + + # Reject negative values (only happens outside of the brain) + noise[noise < 0] = 0 + + # If there are iterations left to perform then recalculate the + # metrics and try again + alpha = 0.5 + if iteration < iterations: + + # Calculate the new metrics + new_sfnr = _calc_sfnr(noise, mask) + new_snr = _calc_snr(noise, mask) - # Sum up the noise of the brain - noise = base + (noise_temporal * temporal_sd_element) + noise_system + temp_sd_new = np.sqrt(((mean_signal / new_sfnr) ** 2) * + temporal_proportion) + spat_sd_new = mean_signal / new_snr - # Reject negative values (only happens outside of the brain) - noise[noise < 0] = 0 + # Update the variables + temporal_sd_system -= ((temp_sd_new - temp_sd_orig) * alpha) + spatial_sd -= ((spat_sd_new - spat_sd_orig) * alpha) return noise diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index a1b0f8278..5fafd174f 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -212,17 +212,26 @@ def test_generate_noise(): assert np.std(signal) < np.std(noise), "Noise was not created" - noise = sim.generate_noise(dimensions=dimensions, - stimfunction_tr=stimfunction_tr, - tr_duration=tr_duration, - template=template, - mask=mask, - noise_dict={'sfnr': 10000, 'snr': 10000}, - ) + noise_high = sim.generate_noise(dimensions=dimensions, + stimfunction_tr=stimfunction_tr, + tr_duration=tr_duration, + template=template, + mask=mask, + noise_dict={'sfnr': 1000, 'snr': 1000}, + ) - system_noise = np.std(noise[mask > 0], 1).mean() + noise_low = sim.generate_noise(dimensions=dimensions, + stimfunction_tr=stimfunction_tr, + tr_duration=tr_duration, + template=template, + mask=mask, + noise_dict={'sfnr': 100, 'snr': 100}, + ) - assert system_noise <= 0.1, "Noise strength could not be manipulated" + system_high = np.std(noise_high[mask > 0], 0).mean() + system_low = np.std(noise_low[mask > 0], 0).mean() + + assert system_low > system_high, "Noise strength could not be manipulated" def test_mask_brain(): @@ -294,7 +303,7 @@ def test_calc_noise(): ) # Mask the volume to be the same shape as a brain - mask, template = sim.mask_brain(dimensions_tr, mask_threshold=0.2) + mask, template = sim.mask_brain(dimensions_tr) stimfunction_tr = stimfunction[::int(tr_duration * 100)] noise = sim.generate_noise(dimensions=dimensions_tr[0:3], stimfunction_tr=stimfunction_tr, @@ -304,26 +313,8 @@ def test_calc_noise(): noise_dict=nd_orig, ) - # Check that noise_system is being calculated correctly - spatial_sd = 5 - temporal_sd = 5 - noise_system = sim._generate_noise_system(dimensions_tr, - spatial_sd, - temporal_sd) - - precision = abs(noise_system[0, 0, 0, :].std() - spatial_sd) - assert precision < spatial_sd, 'noise_system calculated incorrectly' - - precision = abs(noise_system[:, :, :, 0].std() - temporal_sd) - assert precision < spatial_sd, 'noise_system calculated incorrectly' - - # Calculate the noise - nd_calc = sim.calc_noise(volume=noise, - mask=mask) - - # How precise are these estimates - precision = abs(nd_calc['snr'] - nd_orig['snr']) - assert precision < nd_orig['snr'], 'snr calculated incorrectly' + new_sfnr = sim._calc_sfnr(noise, mask) + new_snr = sim._calc_snr(noise, mask) - precision = abs(nd_calc['sfnr'] - nd_orig['sfnr']) - assert precision < nd_orig['sfnr'], 'sfnr calculated incorrectly' + assert abs(nd_orig['snr'] - new_snr) < 5, 'snr calculated incorrectly' + assert abs(nd_orig['sfnr'] - new_sfnr) < 5, 'sfnr calculated incorrectly' From 2040c90a1b704223bf92d6e6b0ee713bc4923ae1 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 6 Feb 2018 20:03:43 -0500 Subject: [PATCH 02/51] Substantially reworked the noise estimation procedure in order to make it simpler and more tractable --- brainiak/utils/fmrisim.py | 265 ++++++++++++++++-------------------- tests/utils/test_fmrisim.py | 33 +++-- 2 files changed, 140 insertions(+), 158 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 3a7dfea56..dc029dc90 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1167,14 +1167,13 @@ def _calc_snr(volume, return mean_voxels / std_voxels -def _calc_temporal_noise(volume, - mask, - auto_reg_order=1, - ): - """ Calculate the the temporal noise of a volume - This calculates the variability of the volume over time and the - proportion of variance over time that is due to autoregression and how - much is due to scanner drift. +def _calc_AR_noise(volume, + mask, + auto_reg_order=2, + ): + """ Calculate the the AR noise of a volume + This calculates the AR of the volume over time by averaging all brain + voxels. Parameters ---------- @@ -1187,42 +1186,25 @@ def _calc_temporal_noise(volume, A binary mask the same size as the volume auto_reg_order : int - What order of the autoregression do you want to pull out + What order of the autoregression do you want to estimate Returns ------- - - sfnr : float
 - The SFNR of the volume (mean brain activity divided by
 temporal - variability in the averaged non brain voxels)

 - - auto_reg_sigma : float - A sigma of the autoregression in the data - - drift_sigma : float - Sigma of the drift in the data + auto_reg_rho : list of floats + A rho of the autoregression in the data """ - # Calculate sfnr and convert from memmap - sfnr = _calc_sfnr(volume, - mask, - ) - # Calculate the time course of voxels within the brain timecourse = np.mean(volume[mask > 0], 0) demeaned_timecourse = timecourse-timecourse.mean() # Pull out the AR values (depends on order) - auto_reg_sigma = ar.AR_est_YW(demeaned_timecourse, auto_reg_order) - auto_reg_sigma = np.sqrt(auto_reg_sigma[1]) + auto_reg_rho,_ = ar.AR_est_YW(demeaned_timecourse, auto_reg_order) - # What is the size of the change in the time course - drift_sigma = timecourse.std().tolist() - - return sfnr, auto_reg_sigma, drift_sigma + return auto_reg_rho.tolist() def calc_noise(volume, @@ -1268,12 +1250,23 @@ def calc_noise(volume, # convert between the mask and the mean of the brain volume) noise_dict['max_activity'] = np.nanmax(np.mean(volume, 3)) - # Since you are deriving the 'true' values then you want your noise to - # be set to that level - # Calculate the temporal variability of the volume - sfnr, auto_reg, drift = _calc_temporal_noise(volume, mask) - noise_dict['sfnr'] = sfnr + noise_dict['auto_reg_rho'] = _calc_AR_noise(volume, mask) + + # Set it such that all of the temporal variability will be accounted for + # by the AR component + noise_dict['auto_reg_sigma'] = 1 + + # Preset these values to be zero, as in you are not attempting to + # simulate them + noise_dict['physiological_sigma'] = 0 + noise_dict['task_sigma'] = 0 + noise_dict['drift_sigma'] = 0 + + # Calculate the sfnr + noise_dict['sfnr'] = _calc_sfnr(volume, + mask, + ) # Calculate the fwhm on a subset of volumes if volume.shape[3] > 100: @@ -1296,13 +1289,6 @@ def calc_noise(volume, mask, ) - # Total temporal noise, since these values only make sense relatively - total_temporal_noise = auto_reg + drift - - # What proportion of noise is accounted for by these variables? - noise_dict['auto_reg_sigma'] = auto_reg / total_temporal_noise - noise_dict['drift_sigma'] = drift / total_temporal_noise - # Return the noise dictionary return noise_dict @@ -1525,7 +1511,6 @@ def _generate_noise_temporal_drift(trs, def _generate_noise_temporal_autoregression(timepoints, - auto_reg_order=1, auto_reg_rho=[0.5], ): @@ -1539,15 +1524,11 @@ def _generate_noise_temporal_autoregression(timepoints, timepoints : 1 Dimensional array What time points are sampled by a TR - auto_reg_order : float - How many timepoints ought to be taken into consideration for the - autoregression function - - auto_reg_rho : float + auto_reg_rho : list of floats What is the scaling factor on the predictiveness of the previous - time point. This value is below 1 to avoid brownian motion (and - growing variance). Values near or greater than one may produce drift or - other unwanted trends. + time point. Values near or greater than one may produce drift or + other unwanted trends. The length of this list determines the order + of the AR function Returns ---------- @@ -1556,10 +1537,8 @@ def _generate_noise_temporal_autoregression(timepoints, """ - if len(auto_reg_rho) == 1: - auto_reg_rho = auto_reg_rho * auto_reg_order # Duplicate this so that - # there is one - # for each value + # Specify the order based on the number of rho supplied + auto_reg_order = len(auto_reg_rho) # Generate a random variable at each time point that is a decayed value # of the previous time points @@ -1769,11 +1748,7 @@ def _generate_noise_temporal(stimfunction_tr, dimensions, template, mask, - fwhm, - motion_sigma, - drift_sigma, - auto_reg_sigma, - physiological_sigma, + noise_dict ): """Generate the temporal noise Generate the time course of the average brain voxel. To change the @@ -1800,27 +1775,14 @@ def _generate_noise_temporal(stimfunction_tr, mask : 3 dimensional array, binary The masked brain, thresholded to distinguish brain and non-brain - fwhm : float - What is the full width half max of the gaussian fields being created - to model spatial noise. - - motion_sigma : float - This is noise that only occurs for the task events, potentially - representing something like noise due to motion - - drift_sigma : float - - What is the sigma on the size of the sine wave - - auto_reg_sigma : float, list - How large is the sigma on the autocorrelation. Higher means more - variable over time. If there are multiple entries then this is - inferred as higher orders of the autoregression - - physiological_sigma : float - - How variable is the signal as a result of physiology, - like heart beat and breathing + noise_dict : dict + A dictionary specifying the types of noise in this experiment. The + noise types interact in important ways. First, all noise types + ending with sigma (e.g. motion sigma) are mixed together in + _generate_temporal_noise. The sigma values describe the proportion of + mixing of these elements. However critically, SFNR is the + parameter that describes how much noise these components contribute + to the brain. Returns ---------- @@ -1837,68 +1799,84 @@ def _generate_noise_temporal(stimfunction_tr, # What time points are sampled by a TR? timepoints = list(np.linspace(0, (trs - 1) * tr_duration, trs)) - noise_drift = _generate_noise_temporal_drift(trs, - tr_duration, - ) + # Preset the volume + noise_volume = np.zeros((dimensions[0], dimensions[1], dimensions[2], trs)) - noise_phys = _generate_noise_temporal_phys(timepoints, + # Generate the drift noise + if noise_dict['drift_sigma'] != 0: + # Calculate the drift time course + noise = _generate_noise_temporal_drift(trs, + tr_duration, ) + # Create a volume with the drift properties + volume = np.ones(dimensions) - noise_autoregression = _generate_noise_temporal_autoregression(timepoints, - ) - - # Generate the volumes that will differ depending on the type of noise - # that it will be used for. For drift you want the volume to not have - # the shape of the brain, for the other types of noise you want them to - # have brain shapes - volume_drift = np.ones(dimensions) - - volume_phys = _generate_noise_spatial(dimensions=dimensions, - template=template, - mask=mask, - fwhm=fwhm, - ) - - volume_autoreg = _generate_noise_spatial(dimensions=dimensions, - template=template, - mask=mask, - fwhm=fwhm, - ) - - # Multiply the noise by the spatial volume - noise_drift_volume = np.multiply.outer(volume_drift, noise_drift) - noise_phys_volume = np.multiply.outer(volume_phys, noise_phys) - noise_autoregression_volume = np.multiply.outer(volume_autoreg, - noise_autoregression) - - # Sum the noise (it would have been nice to just add all of them in a - # single line but this was causing formatting problems) - noise_temporal = noise_drift_volume * drift_sigma - noise_temporal = noise_temporal + (noise_phys_volume * physiological_sigma) - noise_temporal = noise_temporal + (noise_autoregression_volume * - auto_reg_sigma) - - # Only do this if you are making motion variance - if motion_sigma != 0 and np.sum(stimfunction_tr) > 0: - # Make each noise type - noise_task = _generate_noise_temporal_task(stimfunction_tr, - ) - volume_task = _generate_noise_spatial(dimensions=dimensions, - template=template, - mask=mask, - fwhm=fwhm, + # Combine the volume and noise + noise_volume += np.multiply.outer(volume, noise) * noise_dict[ + 'drift_sigma'] + + # Generate the physiological noise + if noise_dict['physiological_sigma'] != 0: + + # Calculate the physiological time course + noise = _generate_noise_temporal_phys(timepoints, + ) + + # Create a brain shaped volume with similar smoothing properties + volume = _generate_noise_spatial(dimensions=dimensions, + template=template, + mask=mask, + fwhm=noise_dict['fwhm'], + ) + + # Combine the volume and noise + noise_volume += np.multiply.outer(volume, noise) * noise_dict[ + 'physiological_sigma'] + + # Generate the AR noise + if noise_dict['auto_reg_sigma'] != 0: + # Calculate the AR time course + noise = _generate_noise_temporal_autoregression(timepoints, + noise_dict[ + 'auto_reg_rho'], + ) + + # Create a brain shaped volume with similar smoothing properties + volume = _generate_noise_spatial(dimensions=dimensions, + template=template, + mask=mask, + fwhm=noise_dict['fwhm'], + ) + + # Combine the volume and noise + noise_volume += np.multiply.outer(volume, noise) * noise_dict[ + 'auto_reg_sigma'] + + # Generate the task related noise + if noise_dict['task_sigma'] != 0 and np.sum(stimfunction_tr) > 0: + + # Calculate the task based noise time course + noise = _generate_noise_temporal_task(stimfunction_tr, ) - noise_task_volume = np.multiply.outer(volume_task, noise_task) - noise_temporal = noise_temporal + (noise_task_volume * motion_sigma) + + # Create a brain shaped volume with similar smoothing properties + volume = _generate_noise_spatial(dimensions=dimensions, + template=template, + mask=mask, + fwhm=noise_dict['fwhm'], + ) + # Combine the volume and noise + noise_volume += np.multiply.outer(volume, noise) * noise_dict[ + 'task_sigma'] # Finally, z score each voxel so things mix nicely - noise_temporal = stats.zscore(noise_temporal, 3) + noise_volume = stats.zscore(noise_volume, 3) # If it is a nan it is because you just divided by zero (since some # voxels are zeros in the template) - noise_temporal[np.isnan(noise_temporal)] = 0 + noise_volume[np.isnan(noise_volume)] = 0 - return noise_temporal + return noise_volume def mask_brain(volume, @@ -2061,12 +2039,14 @@ def _noise_dict_update(noise_dict): # Check what noise is in the dictionary and add if necessary. Numbers # determine relative proportion of noise - if 'motion_sigma' not in noise_dict: - noise_dict['motion_sigma'] = 0 + if 'task_sigma' not in noise_dict: + noise_dict['task_sigma'] = 0 if 'drift_sigma' not in noise_dict: noise_dict['drift_sigma'] = 0.45 if 'auto_reg_sigma' not in noise_dict: - noise_dict['auto_reg_sigma'] = 0.45 + noise_dict['auto_reg_sigma'] = 0.5 + if 'auto_reg_rho' not in noise_dict: + noise_dict['auto_reg_rho'] = [0.5] if 'physiological_sigma' not in noise_dict: noise_dict['physiological_sigma'] = 0.1 if 'sfnr' not in noise_dict: @@ -2145,7 +2125,7 @@ def generate_noise(dimensions, if noise_dict is None: noise_dict = {} - # Take in the noise dictionary and determine whether + # Take in the noise dictionary and add any missing information noise_dict = _noise_dict_update(noise_dict) # What are the dimensions of the volume, including time @@ -2164,16 +2144,7 @@ def generate_noise(dimensions, dimensions=dimensions, template=template, mask=mask, - fwhm=noise_dict[ - 'fwhm'], - motion_sigma=noise_dict[ - 'motion_sigma'], - drift_sigma=noise_dict[ - 'drift_sigma'], - auto_reg_sigma=noise_dict[ - 'auto_reg_sigma'], - physiological_sigma=noise_dict[ - 'physiological_sigma'], + noise_dict=noise_dict, ) # Create the base (this inverts the process to make the template) diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index 5fafd174f..e368f140d 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -193,7 +193,9 @@ def test_generate_noise(): ) # Generate the mask of the signal - mask, template = sim.mask_brain(signal, mask_threshold=0.1) + mask, template = sim.mask_brain(signal, + mask_threshold=0.1, + mask_self=False) assert min(mask[mask > 0]) > 0.1, "Mask thresholding did not work" assert len(np.unique(template) > 2), "Template creation did not work" @@ -253,7 +255,7 @@ def test_mask_brain(): ) # Mask the volume to be the same shape as a brain - mask, _ = sim.mask_brain(volume) + mask, _ = sim.mask_brain(dimensions, mask_self=None,) brain = volume * mask assert np.sum(brain != 0) == np.sum(volume != 0), "Masking did not work" @@ -271,7 +273,7 @@ def test_mask_brain(): ) # Mask the volume to be the same shape as a brain - mask, _ = sim.mask_brain(volume) + mask, _ = sim.mask_brain(dimensions, mask_self=None, ) brain = volume * mask assert np.sum(brain != 0) < np.sum(volume != 0), "Masking did not work" @@ -288,8 +290,11 @@ def test_calc_noise(): dimensions_tr = np.array([10, 10, 10, tr_number]) # Preset the noise dict - nd_orig = {'auto_reg_sigma': 0.6, - 'drift_sigma': 0.4, + nd_orig = {'auto_reg_sigma': 1, + 'drift_sigma': 0, + 'auto_reg_rho': [1.0, -0.5], + 'physiological_sigma': 0, + 'task_sigma': 0, 'snr': 30, 'sfnr': 30, 'max_activity': 1000, @@ -303,7 +308,7 @@ def test_calc_noise(): ) # Mask the volume to be the same shape as a brain - mask, template = sim.mask_brain(dimensions_tr) + mask, template = sim.mask_brain(dimensions_tr, mask_self=None) stimfunction_tr = stimfunction[::int(tr_duration * 100)] noise = sim.generate_noise(dimensions=dimensions_tr[0:3], stimfunction_tr=stimfunction_tr, @@ -313,8 +318,14 @@ def test_calc_noise(): noise_dict=nd_orig, ) - new_sfnr = sim._calc_sfnr(noise, mask) - new_snr = sim._calc_snr(noise, mask) - - assert abs(nd_orig['snr'] - new_snr) < 5, 'snr calculated incorrectly' - assert abs(nd_orig['sfnr'] - new_sfnr) < 5, 'sfnr calculated incorrectly' + # Calculate the noise parameters from this newly generated volume + nd_new = sim.calc_noise(noise, mask) + + snr_diff = abs(nd_orig['snr'] - nd_new['snr']) + assert snr_diff < 5, 'snr calculated incorrectly' + sfnr_diff = abs(nd_orig['sfnr'] - nd_new['sfnr']) + assert sfnr_diff < 5, 'sfnr calculated incorrectly' + ar1_diff = abs(nd_orig['auto_reg_rho'][0] - nd_new['auto_reg_rho'][0]) + assert ar1_diff < 1, 'AR1 calculated incorrectly' + ar2_diff = abs(nd_orig['auto_reg_rho'][1] - nd_new['auto_reg_rho'][1]) + assert ar2_diff < 1, 'AR2 calculated incorrectly' \ No newline at end of file From 636137693cc4a8720f8610d941fb78f6eed92908 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Wed, 14 Feb 2018 16:30:57 -0500 Subject: [PATCH 03/51] Improved AR estimation and calculation --- brainiak/utils/fmrisim.py | 107 ++++++++++++++++++++++++++++-------- tests/utils/test_fmrisim.py | 6 +- 2 files changed, 87 insertions(+), 26 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index dc029dc90..dd97a38d7 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1170,6 +1170,7 @@ def _calc_snr(volume, def _calc_AR_noise(volume, mask, auto_reg_order=2, + sample_num=100, ): """ Calculate the the AR noise of a volume This calculates the AR of the volume over time by averaging all brain @@ -1188,6 +1189,8 @@ def _calc_AR_noise(volume, auto_reg_order : int What order of the autoregression do you want to estimate + sample_num : int + How many voxels would you like to sample to calculate the AR values Returns ------- @@ -1198,13 +1201,27 @@ def _calc_AR_noise(volume, """ # Calculate the time course of voxels within the brain - timecourse = np.mean(volume[mask > 0], 0) - demeaned_timecourse = timecourse-timecourse.mean() + brain_timecourse = volume[mask > 0] - # Pull out the AR values (depends on order) - auto_reg_rho,_ = ar.AR_est_YW(demeaned_timecourse, auto_reg_order) + # Identify some brain voxels to assess + voxel_idxs = list(range(brain_timecourse.shape[0])) + np.random.shuffle(voxel_idxs) - return auto_reg_rho.tolist() + auto_reg_rho_all = np.zeros((sample_num, auto_reg_order)) + for voxel_counter in range(sample_num): + + # Get the timecourse and demean it + timecourse = brain_timecourse[voxel_idxs[voxel_counter], :] + demeaned_timecourse = timecourse - timecourse.mean() + + # Pull out the AR values (depends on order) + auto_reg_rho,_ = ar.AR_est_YW(demeaned_timecourse, auto_reg_order) + + # Add to the list + auto_reg_rho_all[voxel_counter, :] = auto_reg_rho + + # Average all of the values and then convert them to a list + return np.mean(auto_reg_rho_all, 0).tolist() def calc_noise(volume, @@ -1511,7 +1528,11 @@ def _generate_noise_temporal_drift(trs, def _generate_noise_temporal_autoregression(timepoints, - auto_reg_rho=[0.5], + auto_reg_rho, + dimensions, + template, + mask, + fwhm, ): """Generate the autoregression noise @@ -1530,6 +1551,33 @@ def _generate_noise_temporal_autoregression(timepoints, other unwanted trends. The length of this list determines the order of the AR function + dimensions : 3 length array, int + What is the shape of the volume to be generated + + template : 3d array, float + A continuous (0 -> 1) volume describing the likelihood a voxel is in + the brain. This can be used to contrast the brain and non brain. + + mask : 3 dimensional array, binary + The masked brain, thresholded to distinguish brain and non-brain + + fwhm : float + What is the full width half max of the gaussian fields being created. + This is converted into a sigma which is used in this function. + However, this conversion was found empirically by testing values of + sigma and how it relates to fwhm values. The relationship that would be + found in such a test depends on the size of the brain (bigger brains + can have bigger fwhm). + However, small errors shouldn't matter too much since the fwhm + generated here can only be approximate anyway: firstly, although the + distribution that is being drawn from is set to this value, + this will manifest differently on every draw. Secondly, because of + the masking and dimensions of the generated volume, this does not + behave simply- wrapping effects matter (the outputs are + closer to the input value if you have no mask). + Use _calc_fwhm on this volume alone if you have concerns about the + accuracy of the fwhm. + Returns ---------- noise_autoregression : one dimensional array, float @@ -1542,23 +1590,39 @@ def _generate_noise_temporal_autoregression(timepoints, # Generate a random variable at each time point that is a decayed value # of the previous time points - noise_autoregression = [] + noise_autoregression = np.zeros((dimensions[0], dimensions[1], + dimensions[2], len(timepoints))) for tr_counter in range(len(timepoints)): + # Create a brain shaped volume with similar smoothing properties + volume = _generate_noise_spatial(dimensions=dimensions, + template=template, + mask=mask, + fwhm=fwhm, + ) + if tr_counter == 0: - noise_autoregression.append(np.random.normal(0, 1)) + noise_autoregression[:, :, :, tr_counter] = volume else: - temp = [] + temp_vol = np.zeros((dimensions[0], dimensions[1], dimensions[ + 2], auto_reg_order + 1)) for pCounter in list(range(1, auto_reg_order + 1)): + past_TR = int(tr_counter - pCounter) if tr_counter - pCounter >= 0: - past_trs = noise_autoregression[int(tr_counter - pCounter)] + + # Pull out a previous TR + past_vols = noise_autoregression[:, :, :, past_TR] + + # How much to 'discount' this TR past_reg = auto_reg_rho[pCounter - 1] - temp.append(past_trs * past_reg) - random = np.random.normal(0, 1) - noise_autoregression.append(np.sum(temp) + random) + # Add it to the list of TRs to consider + temp_vol[:, :, :, pCounter] = past_vols * past_reg + + noise_autoregression[:, :, :, tr_counter] = np.sum( + temp_vol, 3) + volume # N.B. You don't want to normalize. Although that may make the sigma of # this timecourse 1, it will change the autoregression coefficient to be @@ -1835,22 +1899,19 @@ def _generate_noise_temporal(stimfunction_tr, # Generate the AR noise if noise_dict['auto_reg_sigma'] != 0: - # Calculate the AR time course + + # Calculate the AR time course volume noise = _generate_noise_temporal_autoregression(timepoints, noise_dict[ 'auto_reg_rho'], + dimensions, + template, + mask, + noise_dict['fwhm'], ) - # Create a brain shaped volume with similar smoothing properties - volume = _generate_noise_spatial(dimensions=dimensions, - template=template, - mask=mask, - fwhm=noise_dict['fwhm'], - ) - # Combine the volume and noise - noise_volume += np.multiply.outer(volume, noise) * noise_dict[ - 'auto_reg_sigma'] + noise_volume += noise * noise_dict['auto_reg_sigma'] # Generate the task related noise if noise_dict['task_sigma'] != 0 and np.sum(stimfunction_tr) > 0: diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index e368f140d..75c3e8877 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -219,7 +219,7 @@ def test_generate_noise(): tr_duration=tr_duration, template=template, mask=mask, - noise_dict={'sfnr': 1000, 'snr': 1000}, + noise_dict={'sfnr': 100, 'snr': 100}, ) noise_low = sim.generate_noise(dimensions=dimensions, @@ -227,13 +227,13 @@ def test_generate_noise(): tr_duration=tr_duration, template=template, mask=mask, - noise_dict={'sfnr': 100, 'snr': 100}, + noise_dict={'sfnr': 1000, 'snr': 1000}, ) system_high = np.std(noise_high[mask > 0], 0).mean() system_low = np.std(noise_low[mask > 0], 0).mean() - assert system_low > system_high, "Noise strength could not be manipulated" + assert system_low < system_high, "Noise strength could not be manipulated" def test_mask_brain(): From 229f7bb6e94bbd7a0b6a076e9ce5764460327ca4 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 20 Feb 2018 09:35:23 -0500 Subject: [PATCH 04/51] Updated AR to be ARMA, involving both the generation and estimation --- brainiak/utils/fmrisim.py | 215 +++++++++++++++++++++++++++--------- tests/utils/test_fmrisim.py | 8 +- 2 files changed, 166 insertions(+), 57 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index dd97a38d7..8f2632dce 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -78,7 +78,7 @@ import logging from itertools import product -import nitime.algorithms.autoregressive as ar +from statsmodels.tsa.arima_model import ARMA import math import numpy as np from pkg_resources import resource_stream @@ -1167,21 +1167,23 @@ def _calc_snr(volume, return mean_voxels / std_voxels -def _calc_AR_noise(volume, - mask, - auto_reg_order=2, - sample_num=100, - ): - """ Calculate the the AR noise of a volume - This calculates the AR of the volume over time by averaging all brain - voxels. +def _calc_ARMA_noise(volume, + mask, + auto_reg_order=2, + ma_order=1, + sample_num=10, + ): + """ Calculate the the ARMA noise of a volume + This calculates the autoregressive and moving average noise of the volume + over time by sampling brain voxels and averaging them. Parameters ---------- - volume : 4d array, float + volume : 4d array or 1d array, float Take a volume time series to extract the middle slice from the - middle TR + middle TR. Can also accept a one dimensional time course (mask input + is then ignored). mask : 3d array, binary A binary mask the same size as the volume @@ -1196,33 +1198,55 @@ def _calc_AR_noise(volume, ------- auto_reg_rho : list of floats - A rho of the autoregression in the data + Rho of a specific order for the autoregression noise in the data + + na_rho : list of floats + Moving average of a specific order for the data """ - # Calculate the time course of voxels within the brain - brain_timecourse = volume[mask > 0] + # Pull out the non masked voxels + if len(volume.shape) > 1: + brain_timecourse = volume[mask > 0] + else: + # If a 1 dimensional input is supplied then reshape it to make the + # timecourse + brain_timecourse = volume.reshape(1, len(volume)) # Identify some brain voxels to assess voxel_idxs = list(range(brain_timecourse.shape[0])) np.random.shuffle(voxel_idxs) + # If there are more samples than voxels, take all of the voxels + if len(voxel_idxs) < sample_num: + sample_num = len(voxel_idxs) + auto_reg_rho_all = np.zeros((sample_num, auto_reg_order)) + ma_all = np.zeros((sample_num, ma_order)) for voxel_counter in range(sample_num): # Get the timecourse and demean it timecourse = brain_timecourse[voxel_idxs[voxel_counter], :] demeaned_timecourse = timecourse - timecourse.mean() - # Pull out the AR values (depends on order) - auto_reg_rho,_ = ar.AR_est_YW(demeaned_timecourse, auto_reg_order) + # Pull out the ARMA values (depends on order) + try: + model = ARMA(demeaned_timecourse, [auto_reg_order, ma_order]) + model_fit = model.fit(disp=False) + params = model_fit.params + except: + params = np.ones(auto_reg_order + ma_order + 1) * np.nan # Add to the list - auto_reg_rho_all[voxel_counter, :] = auto_reg_rho + auto_reg_rho_all[voxel_counter, :] = params[1:auto_reg_order + 1] + ma_all[voxel_counter, :] = params[auto_reg_order + 1:] # Average all of the values and then convert them to a list - return np.mean(auto_reg_rho_all, 0).tolist() + auto_reg_rho =np.nanmean(auto_reg_rho_all, 0).tolist() + ma_rho = np.nanmean(ma_all, 0).tolist() + # Return the coefficients + return auto_reg_rho, ma_rho def calc_noise(volume, mask=None, @@ -1268,7 +1292,8 @@ def calc_noise(volume, noise_dict['max_activity'] = np.nanmax(np.mean(volume, 3)) # Calculate the temporal variability of the volume - noise_dict['auto_reg_rho'] = _calc_AR_noise(volume, mask) + noise_dict['auto_reg_rho'], noise_dict['ma_rho'] = _calc_ARMA_noise( + volume, mask) # Set it such that all of the temporal variability will be accounted for # by the AR component @@ -1528,7 +1553,7 @@ def _generate_noise_temporal_drift(trs, def _generate_noise_temporal_autoregression(timepoints, - auto_reg_rho, + noise_dict, dimensions, template, mask, @@ -1537,7 +1562,7 @@ def _generate_noise_temporal_autoregression(timepoints, """Generate the autoregression noise Make a slowly drifting timecourse with the given autoregression - parameters. The output should have an autoregression coefficient of 1 + parameters. This can take in both AR and MA components Parameters ---------- @@ -1545,11 +1570,14 @@ def _generate_noise_temporal_autoregression(timepoints, timepoints : 1 Dimensional array What time points are sampled by a TR - auto_reg_rho : list of floats - What is the scaling factor on the predictiveness of the previous - time point. Values near or greater than one may produce drift or - other unwanted trends. The length of this list determines the order - of the AR function + noise_dict : dict + A dictionary specifying the types of noise in this experiment. The + noise types interact in important ways. First, all noise types + ending with sigma (e.g. motion sigma) are mixed together in + _generate_temporal_noise. The sigma values describe the proportion of + mixing of these elements. However critically, SFNR is the + parameter that describes how much noise these components contribute + to the brain. dimensions : 3 length array, int What is the shape of the volume to be generated @@ -1585,48 +1613,70 @@ def _generate_noise_temporal_autoregression(timepoints, """ + # Pull out the relevant noise parameters + auto_reg_rho = noise_dict['auto_reg_rho'] + ma_rho = noise_dict['ma_rho'] + # Specify the order based on the number of rho supplied auto_reg_order = len(auto_reg_rho) + ma_order = len(ma_rho) + + # This code assumes that the AR order is higher than the MA order + if ma_order > auto_reg_order: + err_str = 'MA order (' + str(ma_order) +') is greater than AR order ' \ + '('+ str(auto_reg_order) + \ + '). Cannot run.' + raise ValueError(err_str) # Generate a random variable at each time point that is a decayed value # of the previous time points noise_autoregression = np.zeros((dimensions[0], dimensions[1], dimensions[2], len(timepoints))) + err_vols = np.zeros((dimensions[0], dimensions[1], dimensions[2], + len(timepoints))) for tr_counter in range(len(timepoints)): - # Create a brain shaped volume with similar smoothing properties - volume = _generate_noise_spatial(dimensions=dimensions, + # Create a brain shaped volume with appropriate smoothing properties + noise = _generate_noise_spatial(dimensions=dimensions, template=template, mask=mask, fwhm=fwhm, ) if tr_counter == 0: - noise_autoregression[:, :, :, tr_counter] = volume + noise_autoregression[:, :, :, tr_counter] = noise else: - temp_vol = np.zeros((dimensions[0], dimensions[1], dimensions[ - 2], auto_reg_order + 1)) + # Preset the volume to collect the AR estimated process + AR_vol = np.zeros((dimensions[0], dimensions[1], dimensions[2])) + + # Iterate through both the AR and MA values for pCounter in list(range(1, auto_reg_order + 1)): past_TR = int(tr_counter - pCounter) + if tr_counter - pCounter >= 0: # Pull out a previous TR past_vols = noise_autoregression[:, :, :, past_TR] - # How much to 'discount' this TR - past_reg = auto_reg_rho[pCounter - 1] + # Add the discounted previous volume + AR_vol += past_vols * auto_reg_rho[pCounter - 1] + + # If the MA order has at least this many coefficients + # then consider the error terms + if ma_order >= pCounter: + + # Collect the noise from the previous TRs + err_vols[:, :, :, tr_counter] = noise - # Add it to the list of TRs to consider - temp_vol[:, :, :, pCounter] = past_vols * past_reg + # Pull out a previous TR + past_noise = err_vols[:, :, :, past_TR] - noise_autoregression[:, :, :, tr_counter] = np.sum( - temp_vol, 3) + volume + # Add the discounted previous noise + AR_vol += past_noise * ma_rho[pCounter - 1] - # N.B. You don't want to normalize. Although that may make the sigma of - # this timecourse 1, it will change the autoregression coefficient to be - # much lower. + noise_autoregression[:, :, :, tr_counter] = AR_vol + noise return noise_autoregression @@ -1902,8 +1952,7 @@ def _generate_noise_temporal(stimfunction_tr, # Calculate the AR time course volume noise = _generate_noise_temporal_autoregression(timepoints, - noise_dict[ - 'auto_reg_rho'], + noise_dict, dimensions, template, mask, @@ -2108,6 +2157,8 @@ def _noise_dict_update(noise_dict): noise_dict['auto_reg_sigma'] = 0.5 if 'auto_reg_rho' not in noise_dict: noise_dict['auto_reg_rho'] = [0.5] + if 'ma_rho' not in noise_dict: + noise_dict['ma_rho'] = [0] if 'physiological_sigma' not in noise_dict: noise_dict['physiological_sigma'] = 0.1 if 'sfnr' not in noise_dict: @@ -2131,7 +2182,7 @@ def generate_noise(dimensions, mask=None, noise_dict=None, temporal_proportion=0.5, - iterations=10, + iterations=20, ): """ Generate the noise to be added to the signal. Default noise parameters will create a noise volume with a standard @@ -2199,15 +2250,6 @@ def generate_noise(dimensions, if mask is None: mask = np.ones(dimensions) - # Generate the noise - noise_temporal = _generate_noise_temporal(stimfunction_tr=stimfunction_tr, - tr_duration=tr_duration, - dimensions=dimensions, - template=template, - mask=mask, - noise_dict=noise_dict, - ) - # Create the base (this inverts the process to make the template) base = template * noise_dict['max_activity'] @@ -2218,6 +2260,15 @@ def generate_noise(dimensions, # What is the mean signal of the non masked voxels in this template? mean_signal = (base[mask > 0]).mean() + # Generate the noise + noise_temporal = _generate_noise_temporal(stimfunction_tr=stimfunction_tr, + tr_duration=tr_duration, + dimensions=dimensions, + template=template, + mask=mask, + noise_dict=noise_dict, + ) + # Convert SFNR into the size of the standard deviation of temporal # variability temporal_sd = (mean_signal / noise_dict['sfnr']) @@ -2229,7 +2280,7 @@ def generate_noise(dimensions, # What is the standard deviation of the background activity spatial_sd = mean_signal / noise_dict['snr'] - # Cycle through the iterations + # Iterate through different parameters to fit SNR and SFNR spat_sd_orig = np.copy(spatial_sd) temp_sd_orig = np.copy(temporal_sd_system) for iteration in list(range(iterations + 1)): @@ -2249,12 +2300,20 @@ def generate_noise(dimensions, # If there are iterations left to perform then recalculate the # metrics and try again alpha = 0.5 + sfnr_threshold = 1 + snr_threshold = 0.1 if iteration < iterations: # Calculate the new metrics new_sfnr = _calc_sfnr(noise, mask) new_snr = _calc_snr(noise, mask) + # If the AR is sufficiently close then break the loop + if abs(new_sfnr) < sfnr_threshold and abs(new_snr) < snr_threshold: + print('Terminated SNR and SFNR fit after ' + str( + iteration) + ' iterations.') + break + temp_sd_new = np.sqrt(((mean_signal / new_sfnr) ** 2) * temporal_proportion) spat_sd_new = mean_signal / new_snr @@ -2263,6 +2322,56 @@ def generate_noise(dimensions, temporal_sd_system -= ((temp_sd_new - temp_sd_orig) * alpha) spatial_sd -= ((spat_sd_new - spat_sd_orig) * alpha) + # Iterate through different MA parameters to fit AR + for iteration in list(range(iterations + 1)): + + # Generate the noise + noise_temporal = _generate_noise_temporal(stimfunction_tr, + tr_duration, + dimensions, + template, + mask, + noise_dict, + ) + + # Set up the machine noise + noise_system = _generate_noise_system(dimensions_tr=dimensions_tr, + spatial_sd=spatial_sd, + temporal_sd=temporal_sd_system, + ) + + # Sum up the noise of the brain + noise = base + (noise_temporal * (1 - temporal_sd_system)) + \ + noise_system + + # Reject negative values (only happens outside of the brain) + noise[noise < 0] = 0 + + # If there are iterations left to perform then recalculate the + # metrics and try again + alpha = 0.95 + ar_threshold = 0.025 + if iteration < iterations: + + # Calculate the new metrics + auto_reg_rho, _ = _calc_ARMA_noise(noise, + mask, + len(noise_dict['auto_reg_rho']), + len(noise_dict['ma_rho']), + ) + + # Calculate the difference in the first AR component + AR_0_diff = auto_reg_rho[0] - noise_dict['auto_reg_rho'][0] + noise_dict['ma_rho'] = [noise_dict['ma_rho'][0] - (AR_0_diff * + alpha)] + + # If the AR is sufficiently close then break the loop + if abs(AR_0_diff) < ar_threshold: + print('Terminated AR fit after ' + str(iteration) + + ' iterations.') + break + + return noise diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index 75c3e8877..db591c4ba 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -219,7 +219,7 @@ def test_generate_noise(): tr_duration=tr_duration, template=template, mask=mask, - noise_dict={'sfnr': 100, 'snr': 100}, + noise_dict={'sfnr': 100, 'snr': 7}, ) noise_low = sim.generate_noise(dimensions=dimensions, @@ -227,7 +227,7 @@ def test_generate_noise(): tr_duration=tr_duration, template=template, mask=mask, - noise_dict={'sfnr': 1000, 'snr': 1000}, + noise_dict={'sfnr': 200, 'snr': 10}, ) system_high = np.std(noise_high[mask > 0], 0).mean() @@ -295,8 +295,8 @@ def test_calc_noise(): 'auto_reg_rho': [1.0, -0.5], 'physiological_sigma': 0, 'task_sigma': 0, - 'snr': 30, - 'sfnr': 30, + 'snr': 10, + 'sfnr': 90, 'max_activity': 1000, 'fwhm': 4, } From 3066c7a539f305936279e070813ce2ae4f9f9514 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 20 Feb 2018 09:41:07 -0500 Subject: [PATCH 05/51] Add statsmodels to 'setup.py' --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index a0d883ec9..7cb7e972c 100644 --- a/setup.py +++ b/setup.py @@ -127,6 +127,7 @@ def finalize_options(self): 'numpy', 'scikit-learn[alldeps]>=0.18', 'scipy!=1.0.0', # See https://github.com/scipy/scipy/pull/8082 + 'statsmodels', 'pymanopt', 'theano', 'pybind11>=1.7', From 8cf4fda75e3eccc7ccf196aaef42003fc1f1f289 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Thu, 8 Mar 2018 13:27:36 -0500 Subject: [PATCH 06/51] Added signal change function and update notebook script --- brainiak/utils/fmrisim.py | 171 +- examples/utils/fmrisim_example.py | 191 - .../utils/fmrisim_multivariate_example.ipynb | 8644 ++++++++++++++++- 3 files changed, 8540 insertions(+), 466 deletions(-) delete mode 100644 examples/utils/fmrisim_example.py diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 8f2632dce..91f1e23c5 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1557,7 +1557,6 @@ def _generate_noise_temporal_autoregression(timepoints, dimensions, template, mask, - fwhm, ): """Generate the autoregression noise @@ -1589,23 +1588,6 @@ def _generate_noise_temporal_autoregression(timepoints, mask : 3 dimensional array, binary The masked brain, thresholded to distinguish brain and non-brain - fwhm : float - What is the full width half max of the gaussian fields being created. - This is converted into a sigma which is used in this function. - However, this conversion was found empirically by testing values of - sigma and how it relates to fwhm values. The relationship that would be - found in such a test depends on the size of the brain (bigger brains - can have bigger fwhm). - However, small errors shouldn't matter too much since the fwhm - generated here can only be approximate anyway: firstly, although the - distribution that is being drawn from is set to this value, - this will manifest differently on every draw. Secondly, because of - the masking and dimensions of the generated volume, this does not - behave simply- wrapping effects matter (the outputs are - closer to the input value if you have no mask). - Use _calc_fwhm on this volume alone if you have concerns about the - accuracy of the fwhm. - Returns ---------- noise_autoregression : one dimensional array, float @@ -1640,7 +1622,7 @@ def _generate_noise_temporal_autoregression(timepoints, noise = _generate_noise_spatial(dimensions=dimensions, template=template, mask=mask, - fwhm=fwhm, + fwhm=noise_dict['fwhm'], ) if tr_counter == 0: @@ -1956,7 +1938,6 @@ def _generate_noise_temporal(stimfunction_tr, dimensions, template, mask, - noise_dict['fwhm'], ) # Combine the volume and noise @@ -2374,6 +2355,156 @@ def generate_noise(dimensions, return noise +def compute_signal_change(signal_function, + noise_function, + noise_dict, + magnitude, + method='PSE', + ): + """ Rescale the current a signal functions based on a metric and + magnitude supplied. Metrics are heavily influenced by Welvaert & Rosseel + (2013). The rescaling is based on the maximal activity in the + timecourse. Importantly, all values within the signal_function are + scaled to have a min of -1 or max of 1 + + Parameters + ---------- + + + signal_function : timepoint by voxel array + The signal time course to be altered. This can have + multiple time courses specified as different columns in this + array. Conceivably you could use the output of + generate_stimfunction as the input but the temporal variance + will be incorrect + + noise_function : timepoint by voxel numpy array + The time course of noise (a voxel created from generate_noise) + for each voxel specified in signal_function. This is necessary + for computing the mean evoked activity and the noise variability + + noise_dict : dict + A dictionary specifying the types of noise in this experiment. The + noise types interact in important ways. First, all noise types + ending with sigma (e.g. motion sigma) are mixed together in + _generate_temporal_noise. The sigma values describe the proportion of + mixing of these elements. However critically, SFNR is the + parameter that describes how much noise these components contribute + to the brain. + + magnitude : list of floats + This specifies the size, in terms of the metric choosen below, + of the signal being generated. This can be a single number, + and thus apply to all signal timecourses, or it can be array and + thus different for each voxel. + + method : str + Select the procedure used to calculate the signal magnitude, + some of which are based on the definitions outlined in Welvaert & + Rosseel (2013): + - 'SFNR': Change proportional to the temporal variability, + as represented by the (desired) SFNR + - 'CNR_Amp/Noise-SD': Signal magnitude relative to the temporal + noise + - 'CNR_Amp2/Noise-Var_dB': Same as above but converted to decibels + - 'CNR_Signal-SD/Noise-SD': Standard deviation in signal + relative to standard deviation in noise + - 'CNR_Signal-Var/Noise-Var_dB': Same as above but converted to + decibels + - 'PSE': Calculate the percent signal change based on the + average activity of the noise (mean / 100 * magnitude) + + + Returns + ---------- + signal_function_scaled : 4d numpy array + The new signal volume with the appropriately set signal change + + """ + + # If you have only one magnitude value, duplicate the magnitude for each + # timecourse you have + if len(magnitude) == 1: + magnitude *= signal_function.shape[1] + + # Scale all signals that to have a range of -1 to 1. This is + # so that any values less than this will be scaled appropriately + signal_function /= np.max(np.abs(signal_function)) + + # Iterate through the timecourses and calculate the metric + signal_function_scaled = np.zeros(signal_function.shape) + for voxel_counter in range(signal_function.shape[1]): + + # Pull out the values for this voxel + sig_voxel = signal_function[:, voxel_counter] + noise_voxel = noise_function[:, voxel_counter] + magnitude_voxel = magnitude[voxel_counter] + + # Calculate the scaled time course using the specified method + if method == 'SFNR': + + # How much temporal variation is there, relative to the mean + # activity + temporal_var = noise_voxel.mean() / noise_dict['sfnr'] + + # Multiply the timecourse by the variability metric + new_sig = sig_voxel * (temporal_var * magnitude_voxel) + + elif method == 'CNR_Amp/Noise-SD': + + # What is the standard deviation of the noise + noise_std = np.std(noise_voxel) + + # Multiply the signal timecourse by the the CNR and noise ( + # rearranging eq.) + new_sig = sig_voxel * (magnitude_voxel * noise_std) + + elif method == 'CNR_Amp2/Noise-Var_dB': + + # Calculate the current signal amplitude (likely to be 1, + # but not necessarily) + sig_amp = np.max(np.abs(sig_voxel)) + + # What is the standard deviation of the noise + noise_std = np.std(noise_voxel) + + # Rearrange the equation to compute the size of signal change in + # decibels + scale = 10 ** ((magnitude_voxel / sig_amp) + np.log10(noise_std + ** 2)) + new_sig = sig_voxel * np.sqrt(scale) + + elif method == 'CNR_Signal-SD/Noise-SD': + + # What is the standard deviation of the signal and noise + sig_std = np.std(sig_voxel) + noise_std = np.std(noise_voxel) + + # Multiply the signal timecourse by the the CNR and noise ( + # rearranging eq.) + new_sig = sig_voxel * (magnitude_voxel * noise_std / sig_std) + + elif method == 'CNR_Signal-Var/Noise-Var_dB': + # What is the standard deviation of the signal and noise + sig_std = np.std(sig_voxel) + noise_std = np.std(noise_voxel) + + # Rearrange the equation to compute the size of signal change in + # decibels + scale = 10 ** ((magnitude_voxel / sig_std) + np.log10(noise_std + ** 2)) + new_sig = sig_voxel * np.sqrt(scale) + + elif method == 'PSE': + + # What is the average activity divided by percentage + scale = ((noise_voxel.mean() / 100) * magnitude_voxel) + new_sig = sig_voxel * scale + + signal_function_scaled[:, voxel_counter] = new_sig + + # Return the scaled time course + return signal_function_scaled def plot_brain(fig, brain, diff --git a/examples/utils/fmrisim_example.py b/examples/utils/fmrisim_example.py deleted file mode 100644 index 1c4bb194c..000000000 --- a/examples/utils/fmrisim_example.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2016 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""fMRI Simulator example script - -Example script to generate a run of a participant's data. This generates -data representing a pair of conditions that are then combined - - Authors: Cameron Ellis (Princeton) 2016 -""" -import logging -import numpy as np -from brainiak.utils import fmrisim as sim -import matplotlib.pyplot as plt -from mpl_toolkits.mplot3d import Axes3D # noqa: F401 -import nibabel - -logger = logging.getLogger(__name__) - -# Inputs for generate_signal -dimensions = np.array([64, 64, 36]) # What is the size of the brain -feature_size = [9, 4, 9, 9] -feature_type = ['loop', 'cube', 'cavity', 'sphere'] -coordinates_A = np.array( - [[32, 32, 18], [26, 32, 18], [32, 26, 18], [32, 32, 12]]) -coordinates_B = np.array( - [[32, 32, 18], [38, 32, 18], [32, 38, 18], [32, 32, 24]]) -signal_magnitude = [1, 0.5, 0.25, -1] # In percent signal change - -# Inputs for generate_stimfunction -onsets_A = [10, 30, 50, 70, 90] -onsets_B = [0, 20, 40, 60, 80] -event_durations = [6] -tr_duration = 2 -temporal_res = 1000.0 # How many elements per second are there -duration = 100 - -# Specify a name to save this generated volume. -savename = 'examples/utils/example.nii' - -# Generate a volume representing the location and quality of the signal -volume_signal_A = sim.generate_signal(dimensions=dimensions, - feature_coordinates=coordinates_A, - feature_type=feature_type, - feature_size=feature_size, - signal_magnitude=signal_magnitude, - ) - -volume_signal_B = sim.generate_signal(dimensions=dimensions, - feature_coordinates=coordinates_B, - feature_type=feature_type, - feature_size=feature_size, - signal_magnitude=signal_magnitude, - ) - -# Visualize the signal that was generated for condition A -fig = plt.figure() -sim.plot_brain(fig, - volume_signal_A) -plt.show() - -# Create the time course for the signal to be generated -stimfunction_A = sim.generate_stimfunction(onsets=onsets_A, - event_durations=event_durations, - total_time=duration, - temporal_resolution=temporal_res, - ) - -stimfunction_B = sim.generate_stimfunction(onsets=onsets_B, - event_durations=event_durations, - total_time=duration, - temporal_resolution=temporal_res, - ) - -# Convolve the HRF with the stimulus sequence -signal_function_A = sim.convolve_hrf(stimfunction=stimfunction_A, - tr_duration=tr_duration, - temporal_resolution=temporal_res, - ) - -signal_function_B = sim.convolve_hrf(stimfunction=stimfunction_B, - tr_duration=tr_duration, - temporal_resolution=temporal_res, - ) - -# Multiply the HRF timecourse with the signal -signal_A = sim.apply_signal(signal_function=signal_function_A, - volume_signal=volume_signal_A, - ) - -signal_B = sim.apply_signal(signal_function=signal_function_B, - volume_signal=volume_signal_B, - ) - -# Combine the signals from the two conditions -signal = signal_A + signal_B - -# Combine the stim functions -stimfunction = list(np.add(stimfunction_A, stimfunction_B)) -stimfunction_tr = stimfunction[::int(tr_duration * temporal_res)] - -# Generate the mask of the signal -mask, template = sim.mask_brain(signal, mask_threshold=0.2) - -# Mask the signal to the shape of a brain (attenuates signal according to grey -# matter likelihood) -signal *= mask.reshape(dimensions[0], dimensions[1], dimensions[2], 1) - -# Generate original noise dict for comparison later -orig_noise_dict = sim._noise_dict_update({}) - -# Create the noise volumes (using the default parameters -noise = sim.generate_noise(dimensions=dimensions, - stimfunction_tr=stimfunction_tr, - tr_duration=tr_duration, - mask=mask, - template=template, - noise_dict=orig_noise_dict, - ) - -# Standardize the signal activity to make it percent signal change -mean_act = (mask * orig_noise_dict['max_activity']).sum() / (mask > 0).sum() -signal = signal * mean_act / 100 - -# Combine the signal and the noise -brain = signal + noise - -# Display the brain -fig = plt.figure() -for tr_counter in list(range(0, brain.shape[3])): - - # Get the axis to be plotted - ax = sim.plot_brain(fig, - brain[:, :, :, tr_counter], - mask=mask, - percentile=99.9) - - # Wait for an input - logging.info(tr_counter) - plt.pause(0.5) - -# Save the volume -affine_matrix = np.diag([-1, 1, 1, 1]) # LR gets flipped -brain_nifti = nibabel.Nifti1Image(brain, affine_matrix) # Create a nifti brain -nibabel.save(brain_nifti, savename) - -# Load in the test dataset and generate a random volume based on it - -# Pull out the data and associated data -volume = nibabel.load(savename).get_data() -dimensions = volume.shape[0:3] -total_time = volume.shape[3] * tr_duration -stimfunction = sim.generate_stimfunction(onsets=[], - event_durations=[0], - total_time=total_time, - ) -stimfunction_tr = stimfunction[::int(tr_duration * temporal_res)] - -# Calculate the mask -mask, template = sim.mask_brain(volume=volume, - mask_self=True, - ) - -# Calculate the noise parameters -noise_dict = sim.calc_noise(volume=volume, - mask=mask, - ) - -# Create the noise volumes (using the default parameters -noise = sim.generate_noise(dimensions=dimensions, - tr_duration=tr_duration, - stimfunction_tr=stimfunction_tr, - template=template, - mask=mask, - noise_dict=noise_dict, - ) - -# Create a nifti brain -brain_noise = nibabel.Nifti1Image(noise, affine_matrix) -nibabel.save(brain_noise, 'examples/utils/example2.nii') # Save diff --git a/examples/utils/fmrisim_multivariate_example.ipynb b/examples/utils/fmrisim_multivariate_example.ipynb index e4ca5d68f..655525843 100644 --- a/examples/utils/fmrisim_multivariate_example.ipynb +++ b/examples/utils/fmrisim_multivariate_example.ipynb @@ -54,6 +54,16 @@ "collapsed": true }, "outputs": [], + "source": [ + "import sys\n", + "sys.path.append('/Users/cellis/Documents/MATLAB/Analysis_BrainIAK/')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "%matplotlib notebook\n", "\n", @@ -80,7 +90,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": { "collapsed": true }, @@ -102,11 +112,17 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(64, 64, 27, 294)\n" + ] + } + ], "source": [ "dim = volume.shape\n", "dimsize = nii.header.get_zooms()\n", @@ -127,7 +143,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": { "collapsed": true }, @@ -153,11 +169,20 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/base/model.py:473: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n", + " 'available', HessianInversionWarning)\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/base/model.py:496: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n", + " \"Check mle_retvals\", ConvergenceWarning)\n" + ] + } + ], "source": [ "noise_dict = {'voxel_size': [dimsize[0], dimsize[1], dimsize[2]]}\n", "noise_dict = fmrisim.calc_noise(volume=volume,\n", @@ -168,9 +193,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Noise parameters of the data were estimated as follows:\n", + "SNR: 7.58299572583\n", + "SFNR: 70.7171164885\n", + "FWHM: 5.65905297219\n" + ] + } + ], "source": [ "print('Noise parameters of the data were estimated as follows:')\n", "print('SNR: ' + str(noise_dict['snr']))\n", @@ -183,6 +219,4222 @@ "metadata": {}, "source": [ "### **2. Generate signal**\n", + "fmrisim can generate realistic fMRI noise when supplied with the appropriate inputs. A single function receives these inputs and deals with generating the noise. The necessary inputs are described below; however, the steps performed by this function are also described in detail for clarity." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/cellis/anaconda/lib/python3.6/site-packages/scipy/stats/stats.py:2245: RuntimeWarning: invalid value encountered in true_divide\n", + " np.expand_dims(sstd, axis=axis))\n" + ] + } + ], + "source": [ + "noise = fmrisim.generate_noise(dimensions=dim[0:3],\n", + " tr_duration=int(tr),\n", + " stimfunction_tr=[0] * dim[3], \n", + " mask=mask,\n", + " template=template,\n", + " noise_dict=noise_dict,\n", + " iterations=1,\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " this.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " // select the cell after this one\n", + " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", + " IPython.notebook.select(index + 1);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "(-0.5, 63.5, 63.5, -0.5)" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Plot spatial noise\n", + "low_spatial = fmrisim._generate_noise_spatial(dim[0:3],\n", + " fwhm=4.0,\n", + " )\n", + "\n", + "high_spatial = fmrisim._generate_noise_spatial(dim[0:3],\n", + " fwhm=1.0,\n", + " )\n", + "plt.figure()\n", + "plt.subplot(1,2,1)\n", + "plt.title('FWHM = 4.0')\n", + "plt.imshow(low_spatial[:, :, 12])\n", + "plt.axis('off')\n", + "\n", + "plt.subplot(1,2,2)\n", + "plt.title('FWHM = 1.0')\n", + "plt.imshow(high_spatial[:, :, 12])\n", + "plt.axis('off')" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "# Create the different types of noise\n", + "total_time = 500\n", + "timepoints = list(range(0, total_time, int(tr)))\n", + "\n", + "drift = fmrisim._generate_noise_temporal_drift(total_time,\n", + " int(tr),\n", + " )\n", + "\n", + "mini_dim = np.array([2, 2, 2])\n", + "autoreg = fmrisim._generate_noise_temporal_autoregression(timepoints,\n", + " noise_dict,\n", + " mini_dim,\n", + " np.ones(mini_dim),\n", + " np.ones(mini_dim),\n", + " )\n", + " \n", + "phys = fmrisim._generate_noise_temporal_phys(timepoints,\n", + " )\n", + "\n", + "stimfunc = np.zeros((int(total_time / tr), 1))\n", + "stimfunc[np.random.randint(0, int(total_time / tr), 50)] = 1\n", + "task = fmrisim._generate_noise_temporal_task(stimfunc,\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " this.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " // select the cell after this one\n", + " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", + " IPython.notebook.select(index + 1);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "system = fmrisim._generate_noise_system(dimensions_tr=dim,\n", + " spatial_sd=50,\n", + " temporal_sd=10, \n", + " )\n", + "# Reshape to be voxel by time\n", + "system = system.reshape(dim[0] * dim[1] * dim[2], dim[3])\n", + "\n", + "plt.figure()\n", + "plt.subplot(1, 3, 1)\n", + "plt.hist(system.flatten(), 10)\n", + "plt.xlabel('Activity')\n", + "plt.ylabel('Frequency')\n", + "\n", + "spatial = system[:10000, 100].reshape(100, 100)\n", + "plt.subplot(1, 3, 2)\n", + "plt.imshow(spatial)\n", + "plt.axis('off')\n", + "plt.title('Spatial plane')\n", + "spatial_range = [(spatial).min(), np.percentile(spatial, 95)]\n", + "plt.clim(spatial_range)\n", + "\n", + "temporal = system[:100, :100]\n", + "plt.subplot(1, 3, 3)\n", + "plt.imshow(temporal)\n", + "plt.axis('off')\n", + "plt.title('Temporal plane')\n", + "temporal_range = [(temporal).min(), np.percentile(temporal, 95)]\n", + "plt.clim(temporal_range)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "System noise in real data has similar properties" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " this.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " // select the cell after this one\n", + " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", + " IPython.notebook.select(index + 1);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "(-0.5, 63.5, 63.5, -0.5)" + ] + }, + "execution_count": 35, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "plt.figure()\n", "plt.imshow(signal_volume[:, :, 24], cmap=plt.cm.gray)\n", @@ -230,59 +5282,829 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "*2.2 Establish effect size*\n", - "\n", - "When specifying the signal we must determine the amount of activity change each voxel undergoes. A useful metric for this is the SFNR value determined from noise calculations because it can be used to estimate the variability in the average voxel. This is set up so that the evoked activity is proportional to the mean activity of a voxel. For a univariate effect, to estimate activity with a Cohen’s d of 1, the size of the change must be equivalent to one standard deviation. For multivariate effects the effect size depends on multiple factors including the number of voxels and conditions. Different measures for effect size could also be calculated, such as percent signal change. Note that that this signal change is based on the average voxel. Instead it might be preferable to model signal change based on the mean of each voxel (i.e. the template value)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "effect_size = 1\n", - "temporal_sd = template * noise_dict['max_activity'] / noise_dict['sfnr']\n", - "\n", - "signal_idxs = np.where(signal_volume == 1)\n", - "\n", - "signal_change = np.zeros((int(signal_volume.sum()), 1))\n", - "for idx_counter in list(range(0, int(signal_volume.sum()))):\n", - " x = signal_idxs[0][idx_counter]\n", - " y = signal_idxs[1][idx_counter]\n", - " z = signal_idxs[2][idx_counter]\n", - " signal_change[idx_counter] = effect_size * temporal_sd[x, y, z]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "*2.3 Characterize signal for voxels*\n", + "*3.2 Characterize signal for voxels*\n", "\n", "Specify the pattern of activity across a given number of voxels that characterizes each condition. This pattern can simply be random, as is done here, or can be structured, like the position of voxels in high dimensional representation space." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 36, "metadata": { "collapsed": true }, "outputs": [], "source": [ "voxels = feature_size ** 3\n", - "pattern_A = np.random.randn(voxels).reshape((voxels, 1)) * signal_change\n", - "pattern_B = np.random.randn(voxels).reshape((voxels, 1)) * signal_change" + "pattern_A = np.random.rand(voxels).reshape((voxels, 1))\n", + "pattern_B = np.random.rand(voxels).reshape((voxels, 1))" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 37, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " this.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " // select the cell after this one\n", + " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", + " IPython.notebook.select(index + 1);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 53, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Display signal\n", "plt.figure()\n", "\n", - "response = stats.zscore(signal_func[0:100,0])\n", + "response = signal_func[0:100,0] * 2\n", "plt.title('Example event time course and voxel response')\n", - "Event_A = plt.plot(stimfunc_A[0:100, 0], 'r', label='Event_A')\n", - "Event_B = plt.plot(stimfunc_B[0:100, 0], 'g', label='Event_B')\n", + "downsample_A = stimfunc_A[0:int(100*temporal_res * tr):int(temporal_res * tr), 0]\n", + "downsample_B = stimfunc_B[0:int(100*temporal_res * tr):int(temporal_res * tr), 0]\n", + "Event_A = plt.plot(downsample_A, 'r', label='Event_A')\n", + "Event_B = plt.plot(downsample_B, 'g', label='Event_B')\n", "Response = plt.plot(response, 'b', label='Response')\n", - "plt.legend(loc=1)" + "plt.legend(loc=1)\n", + "plt.yticks([],'')\n", + "plt.xlabel('nth TR')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "*2.8 Multiply the convolved response with the signal voxels*\n", + "*3.7 Establish signal magnitude*\n", "\n", - "If you have a time course of simulated response for one or more voxels and a three dimensional volume representing voxels that ought to respond to these events then apply_signal will combine these appropriately. This function multiplies each signal voxel in the brain by the convolved event time course. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "signal = fmrisim.apply_signal(signal_func,\n", - " signal_volume,\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### **3. Add noise to signal**\n", - "fmrisim can generate realistic fMRI noise when supplied with the appropriate inputs. A single function receives these inputs and deals with generating the noise. The necessary inputs are described below; however, the steps performed by this function are also described in detail for clarity.\n" + "When specifying the signal we must determine the amount of activity change each voxel undergoes. fmrisim contains a tool to allow you to choose between a variety of different metrics that you could use to scale the signal. For instance, we can calculate percent signal change (referred to below as PSE) by taking the average activity of voxels in an ROI of the noise volume and multiplying it by a proportion to signal the percentage change that this signal maximally evokes. This metric doesn't take account of the variance in the noise but other metrics available do. The choices that are available for computing this metric are based on Welvaert and Rosseel (2013)." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 54, "metadata": { "collapsed": true }, "outputs": [], "source": [ - "noise = fmrisim.generate_noise(dimensions=dim[0:3],\n", - " tr_duration=int(tr),\n", - " stimfunction_tr=weights_all[::int(tr)], \n", - " mask=mask,\n", - " template=template,\n", - " noise_dict=noise_dict,\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plt.figure()\n", - "plt.imshow(noise[:, :, 24, 0], cmap=plt.cm.gray)\n", - "plt.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "*3.1 Create temporal noise*\n", + "# Specify the parameters for signal\n", + "signal_method = 'PSE'\n", + "signal_magnitude = [1]\n", "\n", - "The temporal noise of fMRI data is comprised of multiple components: drift, autoregression, task related motion and physiological noise. To estimate drift, a sine wave with a default period of 300s, is used. To estimate drift, cosine basis functions are combined, with longer runs being comprised of more basis functions (Welvaert, et al., 2011). This drift is then multiplied by a three-dimensional volume of Gaussian random fields of a specific FWHM. Autoregression noise is estimated by creating a time course of Gaussian noise values that are weighted by previous values of the time course. This autoregressive time course is multiplied by a brain shaped volume of Gaussian random fields. Physiological noise is modeled by sine waves comprised of heart rate (1.17Hz) and respiration rate (0.2Hz) (Biswal, et al., 1996) with random phase. This time course is also multiplied by brain shaped spatial noise. Finally, task related noise is simulated by adding Gaussian or Rician noise to time points where there are events (according to the event time course) and in turn this is multiplied by a brain shaped spatial noise volume. These four noise components are then mixed together in proportion to the size of their corresponding noise values. This aggregated volume is then Z scored and the SFNR is used to estimate the appropriate standard deviation of these values across time. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Plot spatial noise\n", - "low_spatial = fmrisim._generate_noise_spatial(dim[0:3],\n", - " fwhm=4.0,\n", - " )\n", - "\n", - "high_spatial = fmrisim._generate_noise_spatial(dim[0:3],\n", - " fwhm=1.0,\n", - " )\n", - "plt.figure()\n", - "plt.subplot(1,2,1)\n", - "plt.title('Low noise')\n", - "plt.imshow(low_spatial[:, :, 12])\n", - "plt.axis('off')\n", + "# Where in the brain are there stimulus evoked voxels\n", + "signal_idxs = np.where(signal_volume == 1)\n", "\n", - "plt.subplot(1,2,2)\n", - "plt.title('High noise')\n", - "plt.imshow(high_spatial[:, :, 12])\n", - "plt.axis('off')" + "# Pull out the voxels corresponding to the noise volume\n", + "noise_func = noise[signal_idxs[0], signal_idxs[1], signal_idxs[2], :]" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 55, "metadata": {}, "outputs": [], "source": [ - "# Create the different types of noise\n", - "timepoints = list(range(0, total_time, int(tr)))\n", - "\n", - "drift = fmrisim._generate_noise_temporal_drift(total_time,\n", - " int(tr),\n", - " )\n", - "\n", - "autoreg = fmrisim._generate_noise_temporal_autoregression(timepoints,\n", - " )\n", - " \n", - "phys = fmrisim._generate_noise_temporal_phys(timepoints,\n", - " )\n", - "\n", - "task = fmrisim._generate_noise_temporal_task(abs(stimfunc_A[::int(tr)]),\n", - " )\n", - "\n", - "# Plot the different noise types\n", - "plt.figure()\n", - "plt.title('Noise types')\n", - "\n", - "plt.subplot(4, 1, 1)\n", - "plt.plot(drift)\n", - "plt.axis('off')\n", - "plt.xlabel('Drift')\n", - "\n", - "plt.subplot(4, 1, 2)\n", - "plt.plot(autoreg)\n", - "plt.axis('off')\n", - "plt.xlabel('Autoregression')\n", - "\n", - "plt.subplot(4, 1, 3)\n", - "plt.plot(phys)\n", - "plt.axis('off')\n", - "plt.xlabel('Physiological')\n", - "\n", - "plt.subplot(4, 1, 4)\n", - "plt.plot(task)\n", - "plt.axis('off')\n", - "plt.xlabel('Task')" + "# Compute the signal appropriate scaled\n", + "signal_func_scaled = fmrisim.compute_signal_change(signal_func,\n", + " noise_func,\n", + " noise_dict,\n", + " magnitude=signal_magnitude,\n", + " method=signal_method,\n", + " )" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "*3.2 Create system noise*\n", - " \n", - "In addition to temporal noise from fluctuations in the scanner there is also machine noise that causes fluctuations in all voxels. When SNR is low, Rician noise is a good estimate of background noise data (Gudbjartsson, & Patz, 1995). From our testing, when SNR is higher than 30 then noise with an exponential distribution better describes the data. The SNR value that is supplied determines the standard deviation of this machine noise.\t" + "*3.8 Multiply the convolved response with the signal voxels*\n", + "\n", + "If you have a time course of simulated response for one or more voxels and a three dimensional volume representing voxels that ought to respond to these events then apply_signal will combine these appropriately. This function multiplies each signal voxel in the brain by the convolved event time course. " ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, + "execution_count": 56, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ - "system = fmrisim._generate_noise_system(dimensions_tr=dim,\n", - " spatial_sd=1.5,\n", - " temporal_sd=1,\n", - " )\n", - "\n", - "plt.figure()\n", - "plt.subplot(1, 3, 1)\n", - "plt.hist(system.flatten())\n", - "plt.title('Activity distribution')\n", - "plt.xlabel('Activity')\n", - "plt.ylabel('Frequency')\n", - "\n", - "plt.subplot(1, 3, 2)\n", - "plt.imshow(system[:, :, 0, 0])\n", - "plt.axis('off')\n", - "plt.title('Spatial plane')\n", - "plt.clim([system.min(), np.percentile(system, 95)])\n", - "\n", - "plt.subplot(1, 3, 3)\n", - "plt.imshow(system[0, :64, 0, :64])\n", - "plt.axis('off')\n", - "plt.title('Temporal plane')\n", - "plt.clim([system.min(), np.percentile(system, 95)])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "*3.3 Combine noise and template*\n", - " \n", - "The template volume is used to estimate the appropriate baseline distribution of MR values. This estimate is then combined with the temporal noise and the system noise to make an estimate of the noise. " + "signal = fmrisim.apply_signal(signal_func_scaled,\n", + " signal_volume,\n", + " )" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "*3.4 Combine signal and noise*\n", + "*3.9 Combine signal and noise*\n", "\n", "Since the brain signal is expected to be small and sparse relative to the noise, it is assumed sufficient to simply add the volume containing signal with the volume modeling noise to make the simulated brain. " ] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, + "execution_count": 57, + "metadata": {}, "outputs": [], "source": [ "brain = signal + noise" @@ -663,10 +7172,33 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, + "execution_count": 58, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([ 31.25925926, 233.17037037, 196.45925926, 220.93333333,\n", + " 159.74814815, 123.03703704, 135.27407407, 184.22222222,\n", + " 74.08888889, 239.28888889, 49.61481481, 190.34074074,\n", + " 12.9037037 , 153.62962963, 257.64444444, 67.97037037,\n", + " 141.39259259, 227.05185185, 37.37777778, 98.56296296,\n", + " 6.78518519, 251.52592593, 110.8 ])" + ] + }, + "execution_count": 58, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "onsets_A / tr" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "metadata": {}, "outputs": [], "source": [ "hrf_lag = 4 # Assumed time from stimulus onset to HRF peak\n", @@ -675,8 +7207,8 @@ "lb = (coordinates - ((feature_size - 1) / 2)).astype('int')[0]\n", "ub = (coordinates + ((feature_size - 1) / 2) + 1).astype('int')[0]\n", "\n", - "trials_A = brain[lb[0]:ub[0], lb[1]:ub[1], lb[2]:ub[2], (onsets_A + hrf_lag / tr).astype('int')]\n", - "trials_B = brain[lb[0]:ub[0], lb[1]:ub[1], lb[2]:ub[2], (onsets_B + hrf_lag / tr).astype('int')]\n", + "trials_A = brain[lb[0]:ub[0], lb[1]:ub[1], lb[2]:ub[2], ((onsets_A + hrf_lag) / tr).astype('int')]\n", + "trials_B = brain[lb[0]:ub[0], lb[1]:ub[1], lb[2]:ub[2], ((onsets_B + hrf_lag) / tr).astype('int')]\n", "\n", "trials_A = trials_A.reshape((voxels, trials_A.shape[3]))\n", "trials_B = trials_B.reshape((voxels, trials_B.shape[3]))" @@ -684,9 +7216,809 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 60, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " this.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " // select the cell after this one\n", + " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", + " IPython.notebook.select(index + 1);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 61, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "distance_matrix = sp_distance.squareform(sp_distance.pdist(np.vstack([trials_A.transpose(), trials_B.transpose()])))\n", "\n", @@ -734,9 +8866,18 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 62, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mean difference between condition A and B: 0.343\n", + "pvalue: 0.842\n" + ] + } + ], "source": [ "mean_difference = (np.mean(trials_A,0) - np.mean(trials_B,0))\n", "ttest = stats.ttest_1samp(mean_difference, 0)\n", @@ -789,17 +8930,10 @@ "\n", "Gudbjartsson, H. and Patz, S. (1995) The Rician distribution of noisy MRI data. Magnetic resonance in medicine 34, 910-914\n", "\n", - "Welvaert, M., et al. (2011) neuRosim: An R package for generating fMRI data. Journal of Statistical Software 44, 1-18\n" + "Welvaert, M., et al. (2011) neuRosim: An R package for generating fMRI data. Journal of Statistical Software 44, 1-18\n", + "\n", + "Welvaert, M., & Rosseel, Y. (2013). On the definition of signal-to-noise ratio and contrast-to-noise ratio for fMRI data. PloS one, 8(11), e77089.\n" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [] } ], "metadata": { From c1d57f74999b953aa7db3ada8fc38d7c3afd0159 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Thu, 8 Mar 2018 18:24:40 -0500 Subject: [PATCH 07/51] Vectorized GRF calculation for 50x increase in speed (git add brainiak/utils/fmrisim.py) --- brainiak/utils/fmrisim.py | 115 ++++++++++++++++++++++++++-------- brainiak/utils/testing_grf.py | 0 2 files changed, 89 insertions(+), 26 deletions(-) create mode 100644 brainiak/utils/testing_grf.py diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 91f1e23c5..2dedf9309 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -66,6 +66,10 @@ use an MNI grey matter atlas but any image can be supplied to create an estimate. +compute_signal_change +Convert the signal function into useful metric units according to metrics +used by others (Welvaert & Rosseel, 2013) + plot_brain Display the brain, timepoint by timepoint, with above threshold voxels highlighted against the outline of the brain. @@ -96,6 +100,7 @@ "calc_noise", "generate_noise", "mask_brain", + "compute_signal_change", "plot_brain", ] @@ -1107,6 +1112,7 @@ def _calc_sfnr(volume, def _calc_snr(volume, mask, + dilation=0, tr=None, remove_baseline=False, ): @@ -1128,6 +1134,10 @@ def _calc_snr(volume, mask : 3d array, binary A binary mask the same size as the volume + dilation : int + How many binary dilations do you want to perform on the mask to + determine the non-brain voxels + tr : int Integer specifying TR to calculate the SNR for @@ -1146,9 +1156,17 @@ def _calc_snr(volume, if tr is None: tr = int(np.ceil(volume.shape[3] / 2)) + # Dilate the mask in order to ensure that non-brain voxels are far from + # the brain + if dilation > 0: + mask_dilated = ndimage.morphology.binary_dilation(mask, + iterations=dilation) + else: + mask_dilated = mask + # Make a matrix of brain and non_brain voxels by time brain_voxels = volume[mask > 0] - nonbrain_voxels = volume[:, :, :, tr][mask == 0] + nonbrain_voxels = volume[:, :, :, tr][mask_dilated == 0] # Find the mean of the non_brain voxels (deals with structure that may @@ -1158,7 +1176,7 @@ def _calc_snr(volume, # Find the standard deviation of the voxels if remove_baseline == True: # exist outside of the mask) - nonbrain_voxels_mean = np.mean(volume[mask == 0], 1) + nonbrain_voxels_mean = np.mean(volume[mask_dilated == 0], 1) std_voxels = np.nanstd(nonbrain_voxels - nonbrain_voxels_mean) else: std_voxels = np.nanstd(nonbrain_voxels) @@ -1763,10 +1781,12 @@ def _generate_noise_spatial(dimensions, """ + # Check the input is correct if len(dimensions) == 4: - return + raise IndexError('4 dimensions have been supplied, only using 3') + dimensions = dimensions[0:3] - def logfunc(x, a, b, c): + def _logfunc(x, a, b, c): """Solve for y given x for log function. Parameters @@ -1792,37 +1812,80 @@ def logfunc(x, a, b, c): """ return (np.log(x + a) / np.log(b)) + c + def _fftIndgen(n): + """# Specify the fft coefficents + + Parameters + ---------- + + n : int + Dim size to estimate over + + Returns + ---------- + + array of ints + fft indexes + """ + + # Pull out the ascending and descending indexes + ascending = np.linspace(0, int(n / 2), int(n / 2 + 1)) + descending = np.linspace(-int(n / 2 - 1), -1, int(n / 2 - 1)) + + return np.concatenate((ascending, descending)) + + def _Pk2(idxs, sigma): + """# Specify the amplitude given the fft coefficents + + Parameters + ---------- + + idxs : 3 by voxel array int + fft indexes + + sigma : float + spatial sigma + + Returns + ---------- + + amplitude : 3 by voxel array + amplitude of the fft coefficients + """ + + # The first set of idxs ought to be zero so make the first value zero to + # avoid a divide by zero error + amp_start = np.array((0)) + + # Compute the amplitude of the function for a series of indices + amp_end = np.sqrt(np.sqrt(np.sum(idxs[:, 1:] ** 2, 0)) ** (-1 * sigma)) + amplitude = np.append(amp_start, amp_end) + + # Return the output + return amplitude + # Convert from fwhm to sigma (relationship discovered empirical, only an # approximation up to sigma = 0 -> 5 which corresponds to fwhm = 0 -> 8, # relies on an assumption of brain size). - spatial_sigma = logfunc(fwhm, -0.36778719, 2.10601011, 2.15439247) + spatial_sigma = _logfunc(fwhm, -0.36778719, 2.10601011, 2.15439247) - # Set up the input to the fast fourier transform - def fftIndgen(n): - a = list(range(0, int(n / 2 + 1))) - b = list(range(1, int(n / 2))) - b.reverse() - b = [-i for i in b] - return a + b + noise = np.fft.fftn(np.random.normal(size=dimensions)) - # Take in an array of fft values and determine the amplitude for those - # values - def Pk2(idxs): + # Create a meshgrid of the object + fft_vol = np.meshgrid(_fftIndgen(dimensions[0]), _fftIndgen(dimensions[1]), + _fftIndgen(dimensions[2])) - # If all the indexes are zero then set the out put to zero - if np.all(idxs == 0): - return 0.0 - return np.sqrt(np.sqrt(np.sum(idxs ** 2)) ** (-1 * spatial_sigma)) + # Reshape the data into a vector + fft_vec = np.asarray((fft_vol[0].flatten(), fft_vol[1].flatten(), fft_vol[ + 2].flatten())) - noise = np.fft.fftn(np.random.normal(size=dimensions)) - amplitude = np.zeros(dimensions) + # Compute the amplitude for each element in the grid + amp_vec = _Pk2(fft_vec, spatial_sigma) - for x, fft_x in enumerate(fftIndgen(dimensions[0])): - for y, fft_y in enumerate(fftIndgen(dimensions[1])): - for z, fft_z in enumerate(fftIndgen(dimensions[2])): - amplitude[x, y, z] = Pk2(np.array([fft_x, fft_y, fft_z])) + # Reshape to be a brain volume + amplitude = amp_vec.reshape(dimensions) - # The output + # Inverse FFT of the noise plus amplitude noise_spatial = np.fft.ifftn(noise * amplitude) # Mask or not, then z score diff --git a/brainiak/utils/testing_grf.py b/brainiak/utils/testing_grf.py new file mode 100644 index 000000000..e69de29bb From 98e3877b1677c9456efbbec701f1c6628f3db87d Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Thu, 8 Mar 2018 18:25:27 -0500 Subject: [PATCH 08/51] Unintentional add --- brainiak/utils/testing_grf.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 brainiak/utils/testing_grf.py diff --git a/brainiak/utils/testing_grf.py b/brainiak/utils/testing_grf.py deleted file mode 100644 index e69de29bb..000000000 From 76209c8cfac1f7264a29d5f8c6e673b123db3576 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Thu, 8 Mar 2018 19:43:36 -0500 Subject: [PATCH 09/51] Did some iteration timing testing add added content --- brainiak/utils/fmrisim.py | 28 ++- .../utils/fmrisim_multivariate_example.ipynb | 208 +++++++++--------- 2 files changed, 122 insertions(+), 114 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 2dedf9309..8d4c66d7b 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1830,7 +1830,8 @@ def _fftIndgen(n): # Pull out the ascending and descending indexes ascending = np.linspace(0, int(n / 2), int(n / 2 + 1)) - descending = np.linspace(-int(n / 2 - 1), -1, int(n / 2 - 1)) + elements = int(np.ceil(n / 2 - 1)) # Round up so that len(output)==n + descending = np.linspace(-elements, -1, elements) return np.concatenate((ascending, descending)) @@ -2226,7 +2227,7 @@ def generate_noise(dimensions, mask=None, noise_dict=None, temporal_proportion=0.5, - iterations=20, + iterations=[20, 5], ): """ Generate the noise to be added to the signal. Default noise parameters will create a noise volume with a standard @@ -2265,9 +2266,12 @@ def generate_noise(dimensions, system noise, if it is low then all of the temporal variability is due to brain variability. - iterations : int - How many steps of fitting the SFNR and SNR values will be performed. - Usually converges after < 10. + iterations : list, int + The first element is how many steps of fitting the SFNR and SNR values + will be performed. Usually converges after < 10. The second element + is the number of iterations for the AR fitting. This is much more + time consuming (has to make a new timecourse on each iteration) so + be careful about setting this appropriately. Returns ---------- @@ -2327,7 +2331,7 @@ def generate_noise(dimensions, # Iterate through different parameters to fit SNR and SFNR spat_sd_orig = np.copy(spatial_sd) temp_sd_orig = np.copy(temporal_sd_system) - for iteration in list(range(iterations + 1)): + for iteration in list(range(iterations[0] + 1)): # Set up the machine noise noise_system = _generate_noise_system(dimensions_tr=dimensions_tr, spatial_sd=spatial_sd, @@ -2346,7 +2350,7 @@ def generate_noise(dimensions, alpha = 0.5 sfnr_threshold = 1 snr_threshold = 0.1 - if iteration < iterations: + if iteration < iterations[0]: # Calculate the new metrics new_sfnr = _calc_sfnr(noise, mask) @@ -2367,7 +2371,7 @@ def generate_noise(dimensions, spatial_sd -= ((spat_sd_new - spat_sd_orig) * alpha) # Iterate through different MA parameters to fit AR - for iteration in list(range(iterations + 1)): + for iteration in list(range(iterations[1] + 1)): # Generate the noise noise_temporal = _generate_noise_temporal(stimfunction_tr, @@ -2395,7 +2399,7 @@ def generate_noise(dimensions, # metrics and try again alpha = 0.95 ar_threshold = 0.025 - if iteration < iterations: + if iteration < iterations[1]: # Calculate the new metrics auto_reg_rho, _ = _calc_ARMA_noise(noise, @@ -2422,7 +2426,7 @@ def compute_signal_change(signal_function, noise_function, noise_dict, magnitude, - method='PSE', + method='PSC', ): """ Rescale the current a signal functions based on a metric and magnitude supplied. Metrics are heavily influenced by Welvaert & Rosseel @@ -2474,7 +2478,7 @@ def compute_signal_change(signal_function, relative to standard deviation in noise - 'CNR_Signal-Var/Noise-Var_dB': Same as above but converted to decibels - - 'PSE': Calculate the percent signal change based on the + - 'PSC': Calculate the percent signal change based on the average activity of the noise (mean / 100 * magnitude) @@ -2558,7 +2562,7 @@ def compute_signal_change(signal_function, ** 2)) new_sig = sig_voxel * np.sqrt(scale) - elif method == 'PSE': + elif method == 'PSC': # What is the average activity divided by percentage scale = ((noise_voxel.mean() / 100) * magnitude_voxel) diff --git a/examples/utils/fmrisim_multivariate_example.ipynb b/examples/utils/fmrisim_multivariate_example.ipynb index 655525843..bb4d6fa6e 100644 --- a/examples/utils/fmrisim_multivariate_example.ipynb +++ b/examples/utils/fmrisim_multivariate_example.ipynb @@ -49,7 +49,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { "collapsed": true }, @@ -61,9 +61,18 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.\n", + " from pandas.core import datetools\n" + ] + } + ], "source": [ "%matplotlib notebook\n", "\n", @@ -171,18 +180,7 @@ "cell_type": "code", "execution_count": 6, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/base/model.py:473: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n", - " 'available', HessianInversionWarning)\n", - "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/base/model.py:496: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n", - " \"Check mle_retvals\", ConvergenceWarning)\n" - ] - } - ], + "outputs": [], "source": [ "noise_dict = {'voxel_size': [dimsize[0], dimsize[1], dimsize[2]]}\n", "noise_dict = fmrisim.calc_noise(volume=volume,\n", @@ -203,7 +201,7 @@ "Noise parameters of the data were estimated as follows:\n", "SNR: 7.58299572583\n", "SFNR: 70.7171164885\n", - "FWHM: 5.65905297219\n" + "FWHM: 5.66380810443\n" ] } ], @@ -224,7 +222,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 21, "metadata": {}, "outputs": [ { @@ -243,13 +241,13 @@ " mask=mask,\n", " template=template,\n", " noise_dict=noise_dict,\n", - " iterations=1,\n", + " iterations=[50,0],\n", " )" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 22, "metadata": {}, "outputs": [ { @@ -1032,7 +1030,7 @@ { "data": { "text/html": [ - "" + "" ], "text/plain": [ "" @@ -1047,7 +1045,7 @@ "(-0.5, 63.5, 63.5, -0.5)" ] }, - "execution_count": 9, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } @@ -1069,7 +1067,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 122, "metadata": {}, "outputs": [ { @@ -1852,7 +1850,7 @@ { "data": { "text/html": [ - "
" + "" ], "text/plain": [ "" @@ -1867,7 +1865,7 @@ "(-0.5, 63.5, 63.5, -0.5)" ] }, - "execution_count": 10, + "execution_count": 122, "metadata": {}, "output_type": "execute_result" } @@ -1895,8 +1893,10 @@ }, { "cell_type": "code", - "execution_count": 30, - "metadata": {}, + "execution_count": 24, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "# Create the different types of noise\n", @@ -1926,7 +1926,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 25, "metadata": {}, "outputs": [ { @@ -2709,7 +2709,7 @@ { "data": { "text/html": [ - "" + "" ], "text/plain": [ "" @@ -2721,10 +2721,10 @@ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 31, + "execution_count": 25, "metadata": {}, "output_type": "execute_result" } @@ -2766,7 +2766,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 26, "metadata": {}, "outputs": [ { @@ -3549,7 +3549,7 @@ { "data": { "text/html": [ - "" + "" ], "text/plain": [ "" @@ -3599,7 +3599,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 28, "metadata": {}, "outputs": [ { @@ -4382,7 +4382,7 @@ { "data": { "text/html": [ - "" + "" ], "text/plain": [ "" @@ -4430,6 +4430,19 @@ "The template volume is used to estimate the appropriate baseline distribution of MR values. This estimate is then combined with the temporal noise and the system noise to make an estimate of the noise. " ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*2.4 Fit the data to the noise parameters*\n", + "\n", + "The generate_noise function does its best to estimate the appropriate noise parameters using; however, because of the complexity of these different noise types, it is often wrong. To compensate, fitting is performed in which parameters involved in the noise generation process are changed and the noise metrics are recalculated to see whether those changes helped the accuracy of the fit. Due to their importance, the parameters that can be fit are SNR, SFNR and AR.\n", + "\n", + "The fitting of SNR/SFNR involves reweighting spatial and temporal metrics of noise. This analysis is relatively quick because this reweighting does not require that any timecourses are recreated, only that they are reweighted. Some iterations are suggested by default because the initial guesses tend to underestimate SFNR and SNR. Contrast this with fitting the AR. In this case the MA rho is adjusted until the AR is appropriate and in doing so the timecourse needs to be recreated for each iteration. Iterations are only suggested if you wish to match your output data to the input data, by default these values are in an appropriate range.\n", + "\n", + "In terms of timing, for a medium size dataset (64x64x27x294 voxels) it takes approximately 23s to generate the data with 0 iterations on a Mac 2014 laptop. For every iteration of fitting the SNR/SFNR, it takes an additional 3s and for every additional iteration of fitting AR it takes an additional 10s (these combine linearly)." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -4450,7 +4463,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 29, "metadata": { "collapsed": true }, @@ -4468,7 +4481,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 30, "metadata": {}, "outputs": [ { @@ -5251,7 +5264,7 @@ { "data": { "text/html": [ - "" + "" ], "text/plain": [ "" @@ -5266,7 +5279,7 @@ "(-0.5, 63.5, 63.5, -0.5)" ] }, - "execution_count": 35, + "execution_count": 30, "metadata": {}, "output_type": "execute_result" } @@ -5289,7 +5302,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 31, "metadata": { "collapsed": true }, @@ -5302,7 +5315,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 32, "metadata": {}, "outputs": [ { @@ -6085,7 +6098,7 @@ { "data": { "text/html": [ - "" + "" ], "text/plain": [ "" @@ -6097,10 +6110,10 @@ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 37, + "execution_count": 32, "metadata": {}, "output_type": "execute_result" } @@ -6129,7 +6142,7 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 33, "metadata": { "collapsed": true }, @@ -6150,7 +6163,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 34, "metadata": { "collapsed": true }, @@ -6180,7 +6193,7 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 35, "metadata": { "collapsed": true }, @@ -6214,7 +6227,7 @@ }, { "cell_type": "code", - "execution_count": 51, + "execution_count": 36, "metadata": { "collapsed": true }, @@ -6237,8 +6250,10 @@ }, { "cell_type": "code", - "execution_count": 52, - "metadata": {}, + "execution_count": 37, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "signal_func = fmrisim.convolve_hrf(stimfunction=weights_all,\n", @@ -6250,7 +6265,7 @@ }, { "cell_type": "code", - "execution_count": 53, + "execution_count": 38, "metadata": {}, "outputs": [ { @@ -7033,7 +7048,7 @@ { "data": { "text/html": [ - "" + "" ], "text/plain": [ "" @@ -7045,10 +7060,10 @@ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 53, + "execution_count": 38, "metadata": {}, "output_type": "execute_result" } @@ -7075,20 +7090,20 @@ "source": [ "*3.7 Establish signal magnitude*\n", "\n", - "When specifying the signal we must determine the amount of activity change each voxel undergoes. fmrisim contains a tool to allow you to choose between a variety of different metrics that you could use to scale the signal. For instance, we can calculate percent signal change (referred to below as PSE) by taking the average activity of voxels in an ROI of the noise volume and multiplying it by a proportion to signal the percentage change that this signal maximally evokes. This metric doesn't take account of the variance in the noise but other metrics available do. The choices that are available for computing this metric are based on Welvaert and Rosseel (2013)." + "When specifying the signal we must determine the amount of activity change each voxel undergoes. fmrisim contains a tool to allow you to choose between a variety of different metrics that you could use to scale the signal. For instance, we can calculate percent signal change (referred to as PSC) by taking the average activity of voxels in an ROI of the noise volume and multiplying it by a proportion to signal the percentage change that this signal maximally evokes. This metric doesn't take account of the variance in the noise but other metrics available do. One metric that does take account of variance, and is used below, is the signal amplitude divided by the temporal variability. The choices that are available for computing this metric are based on Welvaert and Rosseel (2013)." ] }, { "cell_type": "code", - "execution_count": 54, + "execution_count": 113, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# Specify the parameters for signal\n", - "signal_method = 'PSE'\n", - "signal_magnitude = [1]\n", + "signal_method = 'CNR_Amp/Noise-SD'\n", + "signal_magnitude = [0.5]\n", "\n", "# Where in the brain are there stimulus evoked voxels\n", "signal_idxs = np.where(signal_volume == 1)\n", @@ -7099,8 +7114,10 @@ }, { "cell_type": "code", - "execution_count": 55, - "metadata": {}, + "execution_count": 114, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "# Compute the signal appropriate scaled\n", @@ -7123,7 +7140,7 @@ }, { "cell_type": "code", - "execution_count": 56, + "execution_count": 115, "metadata": { "collapsed": true }, @@ -7145,8 +7162,10 @@ }, { "cell_type": "code", - "execution_count": 57, - "metadata": {}, + "execution_count": 116, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "brain = signal + noise" @@ -7172,33 +7191,10 @@ }, { "cell_type": "code", - "execution_count": 58, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([ 31.25925926, 233.17037037, 196.45925926, 220.93333333,\n", - " 159.74814815, 123.03703704, 135.27407407, 184.22222222,\n", - " 74.08888889, 239.28888889, 49.61481481, 190.34074074,\n", - " 12.9037037 , 153.62962963, 257.64444444, 67.97037037,\n", - " 141.39259259, 227.05185185, 37.37777778, 98.56296296,\n", - " 6.78518519, 251.52592593, 110.8 ])" - ] - }, - "execution_count": 58, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "onsets_A / tr" - ] - }, - { - "cell_type": "code", - "execution_count": 59, - "metadata": {}, + "execution_count": 117, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "hrf_lag = 4 # Assumed time from stimulus onset to HRF peak\n", @@ -7216,7 +7212,7 @@ }, { "cell_type": "code", - "execution_count": 60, + "execution_count": 118, "metadata": {}, "outputs": [ { @@ -7999,7 +7995,7 @@ { "data": { "text/html": [ - "" + "" ], "text/plain": [ "" @@ -8011,10 +8007,10 @@ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 60, + "execution_count": 118, "metadata": {}, "output_type": "execute_result" } @@ -8041,7 +8037,7 @@ }, { "cell_type": "code", - "execution_count": 61, + "execution_count": 119, "metadata": {}, "outputs": [ { @@ -8824,7 +8820,7 @@ { "data": { "text/html": [ - "" + "" ], "text/plain": [ "" @@ -8836,10 +8832,10 @@ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 61, + "execution_count": 119, "metadata": {}, "output_type": "execute_result" } @@ -8866,15 +8862,15 @@ }, { "cell_type": "code", - "execution_count": 62, + "execution_count": 120, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Mean difference between condition A and B: 0.343\n", - "pvalue: 0.842\n" + "Mean difference between condition A and B: -0.49\n", + "pvalue: 0.813\n" ] } ], @@ -8897,9 +8893,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 121, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Classification accuracy between condition A and B: 0.9\n" + ] + } + ], "source": [ "input_mat = np.vstack([trials_A.transpose(), trials_B.transpose()])\n", "input_labels = trials_A.shape[1] * [1] + trials_B.shape[1] * [0]\n", From fda9f9e920b58fb172b6b30015de0e5f7234418d Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Thu, 8 Mar 2018 20:58:03 -0500 Subject: [PATCH 10/51] Tweaks of notebook and defaults in fmrisim --- brainiak/utils/fmrisim.py | 6 +- .../utils/fmrisim_multivariate_example.ipynb | 8274 +---------------- 2 files changed, 142 insertions(+), 8138 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 8d4c66d7b..c0a88b541 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -2207,11 +2207,11 @@ def _noise_dict_update(noise_dict): if 'physiological_sigma' not in noise_dict: noise_dict['physiological_sigma'] = 0.1 if 'sfnr' not in noise_dict: - noise_dict['sfnr'] = 30 + noise_dict['sfnr'] = 80 if 'snr' not in noise_dict: - noise_dict['snr'] = 30 + noise_dict['snr'] = 7 if 'max_activity' not in noise_dict: - noise_dict['max_activity'] = 1000 + noise_dict['max_activity'] = 1500 if 'voxel_size' not in noise_dict: noise_dict['voxel_size'] = [1.0, 1.0, 1.0] if 'fwhm' not in noise_dict: diff --git a/examples/utils/fmrisim_multivariate_example.ipynb b/examples/utils/fmrisim_multivariate_example.ipynb index bb4d6fa6e..221739219 100644 --- a/examples/utils/fmrisim_multivariate_example.ipynb +++ b/examples/utils/fmrisim_multivariate_example.ipynb @@ -49,30 +49,9 @@ }, { "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "import sys\n", - "sys.path.append('/Users/cellis/Documents/MATLAB/Analysis_BrainIAK/')" - ] - }, - { - "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.\n", - " from pandas.core import datetools\n" - ] - } - ], + "outputs": [], "source": [ "%matplotlib notebook\n", "\n", @@ -99,7 +78,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "collapsed": true }, @@ -116,25 +95,17 @@ "source": [ "*1.2\tSpecify participant dimensions and resolution*\n", "\n", - "It is possible to manually specify all parameters necessary for fmrisim. However, it is also possible to instead provide an fMRI dataset as input and extract the necessary parameters from that dataset. Such an example is described below and will be followed throughout. Here the size of the volume and the resolution of the voxels within it are determined." + "The size of the volume and the resolution of the voxels must be specified (or extracted from the real data as is the case below)." ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(64, 64, 27, 294)\n" - ] - } - ], + "outputs": [], "source": [ - "dim = volume.shape\n", - "dimsize = nii.header.get_zooms()\n", + "dim = volume.shape # What is the size of the volume\n", + "dimsize = nii.header.get_zooms() # Get voxel dimensions from the nifti header\n", "tr = dimsize[3]\n", "if tr > 100: # If high then these values are likely in ms\n", " tr /= 1000\n", @@ -147,20 +118,20 @@ "source": [ "*1.3 Generate an activity template and a mask*\n", "\n", - "Functions in fmrisim require a continuous map that describes the appropriate average MR value for each voxel in the brain and a mask which specifies voxels in the brain versus voxels outside of the brain. One way to generate both of these volumes is the mask_brain function. At a minimum, this takes as an input the fMRI volume to be simulated. To create the template this volume is averaged over time and bounded to a range from 0 to 1. In other words, voxels with a high value in the template have high activity over time. To create a mask, the template is thresholded. This threshold can be set manually or instead an appropriate value can be determined by looking for the minima between the two first peaks in the histogram of voxel values.\n" + "Functions in fmrisim require a continuous map that describes the appropriate average MR value for each voxel in the brain and a mask which specifies voxels in the brain versus voxels outside of the brain. One way to generate both of these volumes is the mask_brain function. At a minimum, this takes as an input the fMRI volume to be simulated. To create the template this volume is averaged over time and bounded to a range from 0 to 1. In other words, voxels with a high value in the template have high activity over time. To create a mask, the template is thresholded. This threshold can be set manually or instead an appropriate value can be determined by looking for the minima between the two first peaks in the histogram of voxel values. If you would prefer, you could use the [compute_epi_mask](http://nilearn.github.io/modules/generated/nilearn.masking.compute_epi_mask.html) function in nilearn which uses a similar method." ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "mask, template = fmrisim.mask_brain(volume=volume, \n", - " mask_self=True,\n", - " )" + " mask_self=True,\n", + " )" ] }, { @@ -169,19 +140,20 @@ "source": [ "*1.4 Determine noise parameters*\n", "\n", - "A critical step in the fmrisim toolbox is determining the noise parameters of the volume to be created. Many noise parameters are available for specification and if any are not set then they will default to reasonable values. As before, it is instead possible to provide raw fMRI data that will be used to estimate these noise parameters. The goal of the noise estimation is to calculate general descriptive statistics about the noise in the brain that are thought to be important. The simulations are thought to be useful for understanding how signals will survive analyses when embedded in realistic neural noise. \n", + "A critical step in the fmrisim toolbox is determining the noise parameters of the volume to be created. Many noise parameters are available for specification and if any are not set then they will default to reasonable values. As mentioned before, it is instead possible to provide raw fMRI data that will be used to estimate these noise parameters. The goal of the noise estimation is to calculate general descriptive statistics about the noise in the brain that are thought to be important. The simulations are then useful for understanding how signals will survive analyses when embedded in realistic neural noise. \n", "\n", - "Now the disclaimers: the values here are only an estimate and will depend on noise properties combining in the ways specified. In addition, because of the non-linearity and stochasticity of this simulation, this estimation is not fully invertible: if you generate a dataset with a set of noise parameters it will have similar but not the same noise parameters as a result. Moreover, complex interactions between brain regions that likely better describe brain noise are not modelled here: this toolbox pays no attention to regions of the brain or their interactions. Finally, for best results use raw fMRI because if the data has been preprocessed then assumptions this algorithm makes are likely to be erroneous. For instance, if the brain has been masked then this will eliminate variance in non-brain voxels which will mean that calculations of noise dependent on those voxels as a reference will fail.\n", + "Now the disclaimers: the values here are only an estimate and will depend on noise properties combining in the ways assumed. In addition, because of the non-linearity and stochasticity of this simulation, this estimation is not fully invertible: if you generate a dataset with a set of noise parameters it will have similar but not the same noise parameters as a result. Moreover, complex interactions between brain regions that likely better describe brain noise are not modelled here: this toolbox pays no attention to regions of the brain or their interactions. Finally, for best results use raw fMRI because if the data has been preprocessed then assumptions this algorithm makes are likely to be erroneous. For instance, if the brain has been masked then this will eliminate variance in non-brain voxels which will mean that calculations of noise dependent on those voxels as a reference will fail.\n", "\n", - "This toolbox separates noise in two: spatial noise and temporal noise. To estimate spatial noise both the smoothness and the amount of non-brain noise of the data must be quantified. For smoothness, the Full Width Half Max (FWHM) of the volume is averaged for the X, Y and Z dimension and then averaged across a sample of time points. To calculate the Signal to Noise Ratio the mean activity in brain voxels for the middle time point is divided by the standard deviation in activity across non-brain voxels for that time point. For temporal noise the drift, temporal autocorrelation, and functional variability is estimated. The drift is estimated by averaging all non-brain voxels and looking at the variance in this average across time. This time course is also used to estimate the temporal autoregression by taking the first slope coefficient of an autoregression estimation function from the Nitime package . The Signal to Fluctuation Noise Ratio is calculated by dividing the average activity of voxels in the brain with that voxel’s noise (Friedman & Glover, 2006). That noise is calculated by taking the standard deviation of that voxel over time after it has been detrended with a second order polynomial. Other types of noise can be generated, such as physiological noise, but are not estimated by this function.\n" + "This toolbox separates noise in two: spatial noise and temporal noise. To estimate spatial noise both the smoothness and the amount of non-brain noise of the data must be quantified. For smoothness, the Full Width Half Max (FWHM) of the volume is averaged for the X, Y and Z dimension and then averaged across a sample of time points. To calculate the Signal to Noise Ratio (SNR) the mean activity in brain voxels for the middle time point is divided by the standard deviation in activity across non-brain voxels for that time point. For temporal noise an Auto-regressive and moving average (ARMA) process is estimated, along with the overall size of temporal variability. A sample of brain voxels is used to estimate the first two AR components and the first MA component of each voxel's activity over time using the statsmodels package. The Signal to Fluctuation Noise Ratio (SFNR) is calculated by dividing the average activity of voxels in the brain with that voxel’s noise (Friedman & Glover, 2006). That noise is calculated by taking the standard deviation of that voxel over time after it has been detrended with a second order polynomial. The SFNR then controls the amount of functional variability. Other types of noise can be generated, such as physiological noise, but are not estimated by this function.\n" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ + "# Calculate the noise parameters from the data\n", "noise_dict = {'voxel_size': [dimsize[0], dimsize[1], dimsize[2]]}\n", "noise_dict = fmrisim.calc_noise(volume=volume,\n", " mask=mask,\n", @@ -191,20 +163,9 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Noise parameters of the data were estimated as follows:\n", - "SNR: 7.58299572583\n", - "SFNR: 70.7171164885\n", - "FWHM: 5.66380810443\n" - ] - } - ], + "outputs": [], "source": [ "print('Noise parameters of the data were estimated as follows:')\n", "print('SNR: ' + str(noise_dict['snr']))\n", @@ -222,19 +183,11 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/cellis/anaconda/lib/python3.6/site-packages/scipy/stats/stats.py:2245: RuntimeWarning: invalid value encountered in true_divide\n", - " np.expand_dims(sstd, axis=axis))\n" - ] - } - ], + "outputs": [], "source": [ + "# Calculate the noise given the parameters\n", "noise = fmrisim.generate_noise(dimensions=dim[0:3],\n", " tr_duration=int(tr),\n", " stimfunction_tr=[0] * dim[3], \n", @@ -247,812 +200,13 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/javascript": [ - "/* Put everything inside the global mpl namespace */\n", - "window.mpl = {};\n", - "\n", - "\n", - "mpl.get_websocket_type = function() {\n", - " if (typeof(WebSocket) !== 'undefined') {\n", - " return WebSocket;\n", - " } else if (typeof(MozWebSocket) !== 'undefined') {\n", - " return MozWebSocket;\n", - " } else {\n", - " alert('Your browser does not have WebSocket support.' +\n", - " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", - " 'Firefox 4 and 5 are also supported but you ' +\n", - " 'have to enable WebSockets in about:config.');\n", - " };\n", - "}\n", - "\n", - "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", - " this.id = figure_id;\n", - "\n", - " this.ws = websocket;\n", - "\n", - " this.supports_binary = (this.ws.binaryType != undefined);\n", - "\n", - " if (!this.supports_binary) {\n", - " var warnings = document.getElementById(\"mpl-warnings\");\n", - " if (warnings) {\n", - " warnings.style.display = 'block';\n", - " warnings.textContent = (\n", - " \"This browser does not support binary websocket messages. \" +\n", - " \"Performance may be slow.\");\n", - " }\n", - " }\n", - "\n", - " this.imageObj = new Image();\n", - "\n", - " this.context = undefined;\n", - " this.message = undefined;\n", - " this.canvas = undefined;\n", - " this.rubberband_canvas = undefined;\n", - " this.rubberband_context = undefined;\n", - " this.format_dropdown = undefined;\n", - "\n", - " this.image_mode = 'full';\n", - "\n", - " this.root = $('
');\n", - " this._root_extra_style(this.root)\n", - " this.root.attr('style', 'display: inline-block');\n", - "\n", - " $(parent_element).append(this.root);\n", - "\n", - " this._init_header(this);\n", - " this._init_canvas(this);\n", - " this._init_toolbar(this);\n", - "\n", - " var fig = this;\n", - "\n", - " this.waiting = false;\n", - "\n", - " this.ws.onopen = function () {\n", - " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", - " fig.send_message(\"send_image_mode\", {});\n", - " if (mpl.ratio != 1) {\n", - " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", - " }\n", - " fig.send_message(\"refresh\", {});\n", - " }\n", - "\n", - " this.imageObj.onload = function() {\n", - " if (fig.image_mode == 'full') {\n", - " // Full images could contain transparency (where diff images\n", - " // almost always do), so we need to clear the canvas so that\n", - " // there is no ghosting.\n", - " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", - " }\n", - " fig.context.drawImage(fig.imageObj, 0, 0);\n", - " };\n", - "\n", - " this.imageObj.onunload = function() {\n", - " this.ws.close();\n", - " }\n", - "\n", - " this.ws.onmessage = this._make_on_message_function(this);\n", - "\n", - " this.ondownload = ondownload;\n", - "}\n", - "\n", - "mpl.figure.prototype._init_header = function() {\n", - " var titlebar = $(\n", - " '
');\n", - " var titletext = $(\n", - " '
');\n", - " titlebar.append(titletext)\n", - " this.root.append(titlebar);\n", - " this.header = titletext[0];\n", - "}\n", - "\n", - "\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._init_canvas = function() {\n", - " var fig = this;\n", - "\n", - " var canvas_div = $('
');\n", - "\n", - " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", - "\n", - " function canvas_keyboard_event(event) {\n", - " return fig.key_event(event, event['data']);\n", - " }\n", - "\n", - " canvas_div.keydown('key_press', canvas_keyboard_event);\n", - " canvas_div.keyup('key_release', canvas_keyboard_event);\n", - " this.canvas_div = canvas_div\n", - " this._canvas_extra_style(canvas_div)\n", - " this.root.append(canvas_div);\n", - "\n", - " var canvas = $('');\n", - " canvas.addClass('mpl-canvas');\n", - " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", - "\n", - " this.canvas = canvas[0];\n", - " this.context = canvas[0].getContext(\"2d\");\n", - "\n", - " var backingStore = this.context.backingStorePixelRatio ||\n", - "\tthis.context.webkitBackingStorePixelRatio ||\n", - "\tthis.context.mozBackingStorePixelRatio ||\n", - "\tthis.context.msBackingStorePixelRatio ||\n", - "\tthis.context.oBackingStorePixelRatio ||\n", - "\tthis.context.backingStorePixelRatio || 1;\n", - "\n", - " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", - "\n", - " var rubberband = $('');\n", - " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", - "\n", - " var pass_mouse_events = true;\n", - "\n", - " canvas_div.resizable({\n", - " start: function(event, ui) {\n", - " pass_mouse_events = false;\n", - " },\n", - " resize: function(event, ui) {\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " stop: function(event, ui) {\n", - " pass_mouse_events = true;\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " });\n", - "\n", - " function mouse_event_fn(event) {\n", - " if (pass_mouse_events)\n", - " return fig.mouse_event(event, event['data']);\n", - " }\n", - "\n", - " rubberband.mousedown('button_press', mouse_event_fn);\n", - " rubberband.mouseup('button_release', mouse_event_fn);\n", - " // Throttle sequential mouse events to 1 every 20ms.\n", - " rubberband.mousemove('motion_notify', mouse_event_fn);\n", - "\n", - " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", - " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", - "\n", - " canvas_div.on(\"wheel\", function (event) {\n", - " event = event.originalEvent;\n", - " event['data'] = 'scroll'\n", - " if (event.deltaY < 0) {\n", - " event.step = 1;\n", - " } else {\n", - " event.step = -1;\n", - " }\n", - " mouse_event_fn(event);\n", - " });\n", - "\n", - " canvas_div.append(canvas);\n", - " canvas_div.append(rubberband);\n", - "\n", - " this.rubberband = rubberband;\n", - " this.rubberband_canvas = rubberband[0];\n", - " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", - " this.rubberband_context.strokeStyle = \"#000000\";\n", - "\n", - " this._resize_canvas = function(width, height) {\n", - " // Keep the size of the canvas, canvas container, and rubber band\n", - " // canvas in synch.\n", - " canvas_div.css('width', width)\n", - " canvas_div.css('height', height)\n", - "\n", - " canvas.attr('width', width * mpl.ratio);\n", - " canvas.attr('height', height * mpl.ratio);\n", - " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", - "\n", - " rubberband.attr('width', width);\n", - " rubberband.attr('height', height);\n", - " }\n", - "\n", - " // Set the figure to an initial 600x600px, this will subsequently be updated\n", - " // upon first draw.\n", - " this._resize_canvas(600, 600);\n", - "\n", - " // Disable right mouse context menu.\n", - " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", - " return false;\n", - " });\n", - "\n", - " function set_focus () {\n", - " canvas.focus();\n", - " canvas_div.focus();\n", - " }\n", - "\n", - " window.setTimeout(set_focus, 100);\n", - "}\n", - "\n", - "mpl.figure.prototype._init_toolbar = function() {\n", - " var fig = this;\n", - "\n", - " var nav_element = $('
')\n", - " nav_element.attr('style', 'width: 100%');\n", - " this.root.append(nav_element);\n", - "\n", - " // Define a callback function for later on.\n", - " function toolbar_event(event) {\n", - " return fig.toolbar_button_onclick(event['data']);\n", - " }\n", - " function toolbar_mouse_event(event) {\n", - " return fig.toolbar_button_onmouseover(event['data']);\n", - " }\n", - "\n", - " for(var toolbar_ind in mpl.toolbar_items) {\n", - " var name = mpl.toolbar_items[toolbar_ind][0];\n", - " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", - " var image = mpl.toolbar_items[toolbar_ind][2];\n", - " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", - "\n", - " if (!name) {\n", - " // put a spacer in here.\n", - " continue;\n", - " }\n", - " var button = $('');\n", - " button.click(method_name, toolbar_event);\n", - " button.mouseover(tooltip, toolbar_mouse_event);\n", - " nav_element.append(button);\n", - " }\n", - "\n", - " // Add the status bar.\n", - " var status_bar = $('');\n", - " nav_element.append(status_bar);\n", - " this.message = status_bar[0];\n", - "\n", - " // Add the close button to the window.\n", - " var buttongrp = $('
');\n", - " var button = $('');\n", - " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", - " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", - " buttongrp.append(button);\n", - " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", - " titlebar.prepend(buttongrp);\n", - "}\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(el){\n", - " var fig = this\n", - " el.on(\"remove\", function(){\n", - "\tfig.close_ws(fig, {});\n", - " });\n", - "}\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(el){\n", - " // this is important to make the div 'focusable\n", - " el.attr('tabindex', 0)\n", - " // reach out to IPython and tell the keyboard manager to turn it's self\n", - " // off when our div gets focus\n", - "\n", - " // location in version 3\n", - " if (IPython.notebook.keyboard_manager) {\n", - " IPython.notebook.keyboard_manager.register_events(el);\n", - " }\n", - " else {\n", - " // location in version 2\n", - " IPython.keyboard_manager.register_events(el);\n", - " }\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._key_event_extra = function(event, name) {\n", - " var manager = IPython.notebook.keyboard_manager;\n", - " if (!manager)\n", - " manager = IPython.keyboard_manager;\n", - "\n", - " // Check for shift+enter\n", - " if (event.shiftKey && event.which == 13) {\n", - " this.canvas_div.blur();\n", - " // select the cell after this one\n", - " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", - " IPython.notebook.select(index + 1);\n", - " }\n", - "}\n", - "\n", - "mpl.figure.prototype.handle_save = function(fig, msg) {\n", - " fig.ondownload(fig, null);\n", - "}\n", - "\n", - "\n", - "mpl.find_output_cell = function(html_output) {\n", - " // Return the cell and output element which can be found *uniquely* in the notebook.\n", - " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", - " // IPython event is triggered only after the cells have been serialised, which for\n", - " // our purposes (turning an active figure into a static one), is too late.\n", - " var cells = IPython.notebook.get_cells();\n", - " var ncells = cells.length;\n", - " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", - " data = data.data;\n", - " }\n", - " if (data['text/html'] == html_output) {\n", - " return [cell, data, j];\n", - " }\n", - " }\n", - " }\n", - " }\n", - "}\n", - "\n", - "// Register the function which deals with the matplotlib target/channel.\n", - "// The kernel may be null if the page has been refreshed.\n", - "if (IPython.notebook.kernel != null) {\n", - " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", - "}\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "(-0.5, 63.5, 63.5, -0.5)" - ] - }, - "execution_count": 122, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# Plot spatial noise\n", "low_spatial = fmrisim._generate_noise_spatial(dim[0:3],\n", @@ -1893,7 +247,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "metadata": { "collapsed": true }, @@ -1926,809 +280,9 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/javascript": [ - "/* Put everything inside the global mpl namespace */\n", - "window.mpl = {};\n", - "\n", - "\n", - "mpl.get_websocket_type = function() {\n", - " if (typeof(WebSocket) !== 'undefined') {\n", - " return WebSocket;\n", - " } else if (typeof(MozWebSocket) !== 'undefined') {\n", - " return MozWebSocket;\n", - " } else {\n", - " alert('Your browser does not have WebSocket support.' +\n", - " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", - " 'Firefox 4 and 5 are also supported but you ' +\n", - " 'have to enable WebSockets in about:config.');\n", - " };\n", - "}\n", - "\n", - "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", - " this.id = figure_id;\n", - "\n", - " this.ws = websocket;\n", - "\n", - " this.supports_binary = (this.ws.binaryType != undefined);\n", - "\n", - " if (!this.supports_binary) {\n", - " var warnings = document.getElementById(\"mpl-warnings\");\n", - " if (warnings) {\n", - " warnings.style.display = 'block';\n", - " warnings.textContent = (\n", - " \"This browser does not support binary websocket messages. \" +\n", - " \"Performance may be slow.\");\n", - " }\n", - " }\n", - "\n", - " this.imageObj = new Image();\n", - "\n", - " this.context = undefined;\n", - " this.message = undefined;\n", - " this.canvas = undefined;\n", - " this.rubberband_canvas = undefined;\n", - " this.rubberband_context = undefined;\n", - " this.format_dropdown = undefined;\n", - "\n", - " this.image_mode = 'full';\n", - "\n", - " this.root = $('
');\n", - " this._root_extra_style(this.root)\n", - " this.root.attr('style', 'display: inline-block');\n", - "\n", - " $(parent_element).append(this.root);\n", - "\n", - " this._init_header(this);\n", - " this._init_canvas(this);\n", - " this._init_toolbar(this);\n", - "\n", - " var fig = this;\n", - "\n", - " this.waiting = false;\n", - "\n", - " this.ws.onopen = function () {\n", - " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", - " fig.send_message(\"send_image_mode\", {});\n", - " if (mpl.ratio != 1) {\n", - " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", - " }\n", - " fig.send_message(\"refresh\", {});\n", - " }\n", - "\n", - " this.imageObj.onload = function() {\n", - " if (fig.image_mode == 'full') {\n", - " // Full images could contain transparency (where diff images\n", - " // almost always do), so we need to clear the canvas so that\n", - " // there is no ghosting.\n", - " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", - " }\n", - " fig.context.drawImage(fig.imageObj, 0, 0);\n", - " };\n", - "\n", - " this.imageObj.onunload = function() {\n", - " this.ws.close();\n", - " }\n", - "\n", - " this.ws.onmessage = this._make_on_message_function(this);\n", - "\n", - " this.ondownload = ondownload;\n", - "}\n", - "\n", - "mpl.figure.prototype._init_header = function() {\n", - " var titlebar = $(\n", - " '
');\n", - " var titletext = $(\n", - " '
');\n", - " titlebar.append(titletext)\n", - " this.root.append(titlebar);\n", - " this.header = titletext[0];\n", - "}\n", - "\n", - "\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._init_canvas = function() {\n", - " var fig = this;\n", - "\n", - " var canvas_div = $('
');\n", - "\n", - " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", - "\n", - " function canvas_keyboard_event(event) {\n", - " return fig.key_event(event, event['data']);\n", - " }\n", - "\n", - " canvas_div.keydown('key_press', canvas_keyboard_event);\n", - " canvas_div.keyup('key_release', canvas_keyboard_event);\n", - " this.canvas_div = canvas_div\n", - " this._canvas_extra_style(canvas_div)\n", - " this.root.append(canvas_div);\n", - "\n", - " var canvas = $('');\n", - " canvas.addClass('mpl-canvas');\n", - " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", - "\n", - " this.canvas = canvas[0];\n", - " this.context = canvas[0].getContext(\"2d\");\n", - "\n", - " var backingStore = this.context.backingStorePixelRatio ||\n", - "\tthis.context.webkitBackingStorePixelRatio ||\n", - "\tthis.context.mozBackingStorePixelRatio ||\n", - "\tthis.context.msBackingStorePixelRatio ||\n", - "\tthis.context.oBackingStorePixelRatio ||\n", - "\tthis.context.backingStorePixelRatio || 1;\n", - "\n", - " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", - "\n", - " var rubberband = $('');\n", - " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", - "\n", - " var pass_mouse_events = true;\n", - "\n", - " canvas_div.resizable({\n", - " start: function(event, ui) {\n", - " pass_mouse_events = false;\n", - " },\n", - " resize: function(event, ui) {\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " stop: function(event, ui) {\n", - " pass_mouse_events = true;\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " });\n", - "\n", - " function mouse_event_fn(event) {\n", - " if (pass_mouse_events)\n", - " return fig.mouse_event(event, event['data']);\n", - " }\n", - "\n", - " rubberband.mousedown('button_press', mouse_event_fn);\n", - " rubberband.mouseup('button_release', mouse_event_fn);\n", - " // Throttle sequential mouse events to 1 every 20ms.\n", - " rubberband.mousemove('motion_notify', mouse_event_fn);\n", - "\n", - " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", - " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", - "\n", - " canvas_div.on(\"wheel\", function (event) {\n", - " event = event.originalEvent;\n", - " event['data'] = 'scroll'\n", - " if (event.deltaY < 0) {\n", - " event.step = 1;\n", - " } else {\n", - " event.step = -1;\n", - " }\n", - " mouse_event_fn(event);\n", - " });\n", - "\n", - " canvas_div.append(canvas);\n", - " canvas_div.append(rubberband);\n", - "\n", - " this.rubberband = rubberband;\n", - " this.rubberband_canvas = rubberband[0];\n", - " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", - " this.rubberband_context.strokeStyle = \"#000000\";\n", - "\n", - " this._resize_canvas = function(width, height) {\n", - " // Keep the size of the canvas, canvas container, and rubber band\n", - " // canvas in synch.\n", - " canvas_div.css('width', width)\n", - " canvas_div.css('height', height)\n", - "\n", - " canvas.attr('width', width * mpl.ratio);\n", - " canvas.attr('height', height * mpl.ratio);\n", - " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", - "\n", - " rubberband.attr('width', width);\n", - " rubberband.attr('height', height);\n", - " }\n", - "\n", - " // Set the figure to an initial 600x600px, this will subsequently be updated\n", - " // upon first draw.\n", - " this._resize_canvas(600, 600);\n", - "\n", - " // Disable right mouse context menu.\n", - " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", - " return false;\n", - " });\n", - "\n", - " function set_focus () {\n", - " canvas.focus();\n", - " canvas_div.focus();\n", - " }\n", - "\n", - " window.setTimeout(set_focus, 100);\n", - "}\n", - "\n", - "mpl.figure.prototype._init_toolbar = function() {\n", - " var fig = this;\n", - "\n", - " var nav_element = $('
')\n", - " nav_element.attr('style', 'width: 100%');\n", - " this.root.append(nav_element);\n", - "\n", - " // Define a callback function for later on.\n", - " function toolbar_event(event) {\n", - " return fig.toolbar_button_onclick(event['data']);\n", - " }\n", - " function toolbar_mouse_event(event) {\n", - " return fig.toolbar_button_onmouseover(event['data']);\n", - " }\n", - "\n", - " for(var toolbar_ind in mpl.toolbar_items) {\n", - " var name = mpl.toolbar_items[toolbar_ind][0];\n", - " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", - " var image = mpl.toolbar_items[toolbar_ind][2];\n", - " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", - "\n", - " if (!name) {\n", - " // put a spacer in here.\n", - " continue;\n", - " }\n", - " var button = $('');\n", - " button.click(method_name, toolbar_event);\n", - " button.mouseover(tooltip, toolbar_mouse_event);\n", - " nav_element.append(button);\n", - " }\n", - "\n", - " // Add the status bar.\n", - " var status_bar = $('');\n", - " nav_element.append(status_bar);\n", - " this.message = status_bar[0];\n", - "\n", - " // Add the close button to the window.\n", - " var buttongrp = $('
');\n", - " var button = $('');\n", - " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", - " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", - " buttongrp.append(button);\n", - " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", - " titlebar.prepend(buttongrp);\n", - "}\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(el){\n", - " var fig = this\n", - " el.on(\"remove\", function(){\n", - "\tfig.close_ws(fig, {});\n", - " });\n", - "}\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(el){\n", - " // this is important to make the div 'focusable\n", - " el.attr('tabindex', 0)\n", - " // reach out to IPython and tell the keyboard manager to turn it's self\n", - " // off when our div gets focus\n", - "\n", - " // location in version 3\n", - " if (IPython.notebook.keyboard_manager) {\n", - " IPython.notebook.keyboard_manager.register_events(el);\n", - " }\n", - " else {\n", - " // location in version 2\n", - " IPython.keyboard_manager.register_events(el);\n", - " }\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._key_event_extra = function(event, name) {\n", - " var manager = IPython.notebook.keyboard_manager;\n", - " if (!manager)\n", - " manager = IPython.keyboard_manager;\n", - "\n", - " // Check for shift+enter\n", - " if (event.shiftKey && event.which == 13) {\n", - " this.canvas_div.blur();\n", - " // select the cell after this one\n", - " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", - " IPython.notebook.select(index + 1);\n", - " }\n", - "}\n", - "\n", - "mpl.figure.prototype.handle_save = function(fig, msg) {\n", - " fig.ondownload(fig, null);\n", - "}\n", - "\n", - "\n", - "mpl.find_output_cell = function(html_output) {\n", - " // Return the cell and output element which can be found *uniquely* in the notebook.\n", - " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", - " // IPython event is triggered only after the cells have been serialised, which for\n", - " // our purposes (turning an active figure into a static one), is too late.\n", - " var cells = IPython.notebook.get_cells();\n", - " var ncells = cells.length;\n", - " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", - " data = data.data;\n", - " }\n", - " if (data['text/html'] == html_output) {\n", - " return [cell, data, j];\n", - " }\n", - " }\n", - " }\n", - " }\n", - "}\n", - "\n", - "// Register the function which deals with the matplotlib target/channel.\n", - "// The kernel may be null if the page has been refreshed.\n", - "if (IPython.notebook.kernel != null) {\n", - " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", - "}\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "system = fmrisim._generate_noise_system(dimensions_tr=dim,\n", " spatial_sd=50,\n", @@ -3599,799 +363,9 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/javascript": [ - "/* Put everything inside the global mpl namespace */\n", - "window.mpl = {};\n", - "\n", - "\n", - "mpl.get_websocket_type = function() {\n", - " if (typeof(WebSocket) !== 'undefined') {\n", - " return WebSocket;\n", - " } else if (typeof(MozWebSocket) !== 'undefined') {\n", - " return MozWebSocket;\n", - " } else {\n", - " alert('Your browser does not have WebSocket support.' +\n", - " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", - " 'Firefox 4 and 5 are also supported but you ' +\n", - " 'have to enable WebSockets in about:config.');\n", - " };\n", - "}\n", - "\n", - "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", - " this.id = figure_id;\n", - "\n", - " this.ws = websocket;\n", - "\n", - " this.supports_binary = (this.ws.binaryType != undefined);\n", - "\n", - " if (!this.supports_binary) {\n", - " var warnings = document.getElementById(\"mpl-warnings\");\n", - " if (warnings) {\n", - " warnings.style.display = 'block';\n", - " warnings.textContent = (\n", - " \"This browser does not support binary websocket messages. \" +\n", - " \"Performance may be slow.\");\n", - " }\n", - " }\n", - "\n", - " this.imageObj = new Image();\n", - "\n", - " this.context = undefined;\n", - " this.message = undefined;\n", - " this.canvas = undefined;\n", - " this.rubberband_canvas = undefined;\n", - " this.rubberband_context = undefined;\n", - " this.format_dropdown = undefined;\n", - "\n", - " this.image_mode = 'full';\n", - "\n", - " this.root = $('
');\n", - " this._root_extra_style(this.root)\n", - " this.root.attr('style', 'display: inline-block');\n", - "\n", - " $(parent_element).append(this.root);\n", - "\n", - " this._init_header(this);\n", - " this._init_canvas(this);\n", - " this._init_toolbar(this);\n", - "\n", - " var fig = this;\n", - "\n", - " this.waiting = false;\n", - "\n", - " this.ws.onopen = function () {\n", - " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", - " fig.send_message(\"send_image_mode\", {});\n", - " if (mpl.ratio != 1) {\n", - " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", - " }\n", - " fig.send_message(\"refresh\", {});\n", - " }\n", - "\n", - " this.imageObj.onload = function() {\n", - " if (fig.image_mode == 'full') {\n", - " // Full images could contain transparency (where diff images\n", - " // almost always do), so we need to clear the canvas so that\n", - " // there is no ghosting.\n", - " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", - " }\n", - " fig.context.drawImage(fig.imageObj, 0, 0);\n", - " };\n", - "\n", - " this.imageObj.onunload = function() {\n", - " this.ws.close();\n", - " }\n", - "\n", - " this.ws.onmessage = this._make_on_message_function(this);\n", - "\n", - " this.ondownload = ondownload;\n", - "}\n", - "\n", - "mpl.figure.prototype._init_header = function() {\n", - " var titlebar = $(\n", - " '
');\n", - " var titletext = $(\n", - " '
');\n", - " titlebar.append(titletext)\n", - " this.root.append(titlebar);\n", - " this.header = titletext[0];\n", - "}\n", - "\n", - "\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._init_canvas = function() {\n", - " var fig = this;\n", - "\n", - " var canvas_div = $('
');\n", - "\n", - " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", - "\n", - " function canvas_keyboard_event(event) {\n", - " return fig.key_event(event, event['data']);\n", - " }\n", - "\n", - " canvas_div.keydown('key_press', canvas_keyboard_event);\n", - " canvas_div.keyup('key_release', canvas_keyboard_event);\n", - " this.canvas_div = canvas_div\n", - " this._canvas_extra_style(canvas_div)\n", - " this.root.append(canvas_div);\n", - "\n", - " var canvas = $('');\n", - " canvas.addClass('mpl-canvas');\n", - " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", - "\n", - " this.canvas = canvas[0];\n", - " this.context = canvas[0].getContext(\"2d\");\n", - "\n", - " var backingStore = this.context.backingStorePixelRatio ||\n", - "\tthis.context.webkitBackingStorePixelRatio ||\n", - "\tthis.context.mozBackingStorePixelRatio ||\n", - "\tthis.context.msBackingStorePixelRatio ||\n", - "\tthis.context.oBackingStorePixelRatio ||\n", - "\tthis.context.backingStorePixelRatio || 1;\n", - "\n", - " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", - "\n", - " var rubberband = $('');\n", - " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", - "\n", - " var pass_mouse_events = true;\n", - "\n", - " canvas_div.resizable({\n", - " start: function(event, ui) {\n", - " pass_mouse_events = false;\n", - " },\n", - " resize: function(event, ui) {\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " stop: function(event, ui) {\n", - " pass_mouse_events = true;\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " });\n", - "\n", - " function mouse_event_fn(event) {\n", - " if (pass_mouse_events)\n", - " return fig.mouse_event(event, event['data']);\n", - " }\n", - "\n", - " rubberband.mousedown('button_press', mouse_event_fn);\n", - " rubberband.mouseup('button_release', mouse_event_fn);\n", - " // Throttle sequential mouse events to 1 every 20ms.\n", - " rubberband.mousemove('motion_notify', mouse_event_fn);\n", - "\n", - " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", - " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", - "\n", - " canvas_div.on(\"wheel\", function (event) {\n", - " event = event.originalEvent;\n", - " event['data'] = 'scroll'\n", - " if (event.deltaY < 0) {\n", - " event.step = 1;\n", - " } else {\n", - " event.step = -1;\n", - " }\n", - " mouse_event_fn(event);\n", - " });\n", - "\n", - " canvas_div.append(canvas);\n", - " canvas_div.append(rubberband);\n", - "\n", - " this.rubberband = rubberband;\n", - " this.rubberband_canvas = rubberband[0];\n", - " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", - " this.rubberband_context.strokeStyle = \"#000000\";\n", - "\n", - " this._resize_canvas = function(width, height) {\n", - " // Keep the size of the canvas, canvas container, and rubber band\n", - " // canvas in synch.\n", - " canvas_div.css('width', width)\n", - " canvas_div.css('height', height)\n", - "\n", - " canvas.attr('width', width * mpl.ratio);\n", - " canvas.attr('height', height * mpl.ratio);\n", - " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", - "\n", - " rubberband.attr('width', width);\n", - " rubberband.attr('height', height);\n", - " }\n", - "\n", - " // Set the figure to an initial 600x600px, this will subsequently be updated\n", - " // upon first draw.\n", - " this._resize_canvas(600, 600);\n", - "\n", - " // Disable right mouse context menu.\n", - " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", - " return false;\n", - " });\n", - "\n", - " function set_focus () {\n", - " canvas.focus();\n", - " canvas_div.focus();\n", - " }\n", - "\n", - " window.setTimeout(set_focus, 100);\n", - "}\n", - "\n", - "mpl.figure.prototype._init_toolbar = function() {\n", - " var fig = this;\n", - "\n", - " var nav_element = $('
')\n", - " nav_element.attr('style', 'width: 100%');\n", - " this.root.append(nav_element);\n", - "\n", - " // Define a callback function for later on.\n", - " function toolbar_event(event) {\n", - " return fig.toolbar_button_onclick(event['data']);\n", - " }\n", - " function toolbar_mouse_event(event) {\n", - " return fig.toolbar_button_onmouseover(event['data']);\n", - " }\n", - "\n", - " for(var toolbar_ind in mpl.toolbar_items) {\n", - " var name = mpl.toolbar_items[toolbar_ind][0];\n", - " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", - " var image = mpl.toolbar_items[toolbar_ind][2];\n", - " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", - "\n", - " if (!name) {\n", - " // put a spacer in here.\n", - " continue;\n", - " }\n", - " var button = $('');\n", - " button.click(method_name, toolbar_event);\n", - " button.mouseover(tooltip, toolbar_mouse_event);\n", - " nav_element.append(button);\n", - " }\n", - "\n", - " // Add the status bar.\n", - " var status_bar = $('');\n", - " nav_element.append(status_bar);\n", - " this.message = status_bar[0];\n", - "\n", - " // Add the close button to the window.\n", - " var buttongrp = $('
');\n", - " var button = $('');\n", - " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", - " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", - " buttongrp.append(button);\n", - " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", - " titlebar.prepend(buttongrp);\n", - "}\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(el){\n", - " var fig = this\n", - " el.on(\"remove\", function(){\n", - "\tfig.close_ws(fig, {});\n", - " });\n", - "}\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(el){\n", - " // this is important to make the div 'focusable\n", - " el.attr('tabindex', 0)\n", - " // reach out to IPython and tell the keyboard manager to turn it's self\n", - " // off when our div gets focus\n", - "\n", - " // location in version 3\n", - " if (IPython.notebook.keyboard_manager) {\n", - " IPython.notebook.keyboard_manager.register_events(el);\n", - " }\n", - " else {\n", - " // location in version 2\n", - " IPython.keyboard_manager.register_events(el);\n", - " }\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._key_event_extra = function(event, name) {\n", - " var manager = IPython.notebook.keyboard_manager;\n", - " if (!manager)\n", - " manager = IPython.keyboard_manager;\n", - "\n", - " // Check for shift+enter\n", - " if (event.shiftKey && event.which == 13) {\n", - " this.canvas_div.blur();\n", - " // select the cell after this one\n", - " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", - " IPython.notebook.select(index + 1);\n", - " }\n", - "}\n", - "\n", - "mpl.figure.prototype.handle_save = function(fig, msg) {\n", - " fig.ondownload(fig, null);\n", - "}\n", - "\n", - "\n", - "mpl.find_output_cell = function(html_output) {\n", - " // Return the cell and output element which can be found *uniquely* in the notebook.\n", - " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", - " // IPython event is triggered only after the cells have been serialised, which for\n", - " // our purposes (turning an active figure into a static one), is too late.\n", - " var cells = IPython.notebook.get_cells();\n", - " var ncells = cells.length;\n", - " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", - " data = data.data;\n", - " }\n", - " if (data['text/html'] == html_output) {\n", - " return [cell, data, j];\n", - " }\n", - " }\n", - " }\n", - " }\n", - "}\n", - "\n", - "// Register the function which deals with the matplotlib target/channel.\n", - "// The kernel may be null if the page has been refreshed.\n", - "if (IPython.notebook.kernel != null) {\n", - " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", - "}\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "(-0.5, 63.5, 63.5, -0.5)" - ] - }, - "execution_count": 30, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "plt.figure()\n", - "plt.imshow(signal_volume[:, :, 24], cmap=plt.cm.gray)\n", - "plt.imshow(mask[:, :, 24], cmap=plt.cm.gray, alpha=.5)\n", + "plt.imshow(signal_volume[:, :, 21], cmap=plt.cm.gray)\n", + "plt.imshow(mask[:, :, 21], cmap=plt.cm.gray, alpha=.5)\n", "plt.axis('off')" ] }, @@ -5302,829 +507,34 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ + "# Create a pattern for each voxel in our signal ROI\n", "voxels = feature_size ** 3\n", - "pattern_A = np.random.rand(voxels).reshape((voxels, 1))\n", + "\n", + "# Pull the conical voxel activity from a uniform distribution\n", + "pattern_A = np.random.rand(voxels).reshape((voxels, 1)) \n", "pattern_B = np.random.rand(voxels).reshape((voxels, 1))" ] }, { "cell_type": "code", - "execution_count": 32, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/javascript": [ - "/* Put everything inside the global mpl namespace */\n", - "window.mpl = {};\n", - "\n", - "\n", - "mpl.get_websocket_type = function() {\n", - " if (typeof(WebSocket) !== 'undefined') {\n", - " return WebSocket;\n", - " } else if (typeof(MozWebSocket) !== 'undefined') {\n", - " return MozWebSocket;\n", - " } else {\n", - " alert('Your browser does not have WebSocket support.' +\n", - " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", - " 'Firefox 4 and 5 are also supported but you ' +\n", - " 'have to enable WebSockets in about:config.');\n", - " };\n", - "}\n", - "\n", - "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", - " this.id = figure_id;\n", - "\n", - " this.ws = websocket;\n", - "\n", - " this.supports_binary = (this.ws.binaryType != undefined);\n", - "\n", - " if (!this.supports_binary) {\n", - " var warnings = document.getElementById(\"mpl-warnings\");\n", - " if (warnings) {\n", - " warnings.style.display = 'block';\n", - " warnings.textContent = (\n", - " \"This browser does not support binary websocket messages. \" +\n", - " \"Performance may be slow.\");\n", - " }\n", - " }\n", - "\n", - " this.imageObj = new Image();\n", - "\n", - " this.context = undefined;\n", - " this.message = undefined;\n", - " this.canvas = undefined;\n", - " this.rubberband_canvas = undefined;\n", - " this.rubberband_context = undefined;\n", - " this.format_dropdown = undefined;\n", - "\n", - " this.image_mode = 'full';\n", - "\n", - " this.root = $('
');\n", - " this._root_extra_style(this.root)\n", - " this.root.attr('style', 'display: inline-block');\n", - "\n", - " $(parent_element).append(this.root);\n", - "\n", - " this._init_header(this);\n", - " this._init_canvas(this);\n", - " this._init_toolbar(this);\n", - "\n", - " var fig = this;\n", - "\n", - " this.waiting = false;\n", - "\n", - " this.ws.onopen = function () {\n", - " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", - " fig.send_message(\"send_image_mode\", {});\n", - " if (mpl.ratio != 1) {\n", - " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", - " }\n", - " fig.send_message(\"refresh\", {});\n", - " }\n", - "\n", - " this.imageObj.onload = function() {\n", - " if (fig.image_mode == 'full') {\n", - " // Full images could contain transparency (where diff images\n", - " // almost always do), so we need to clear the canvas so that\n", - " // there is no ghosting.\n", - " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", - " }\n", - " fig.context.drawImage(fig.imageObj, 0, 0);\n", - " };\n", - "\n", - " this.imageObj.onunload = function() {\n", - " this.ws.close();\n", - " }\n", - "\n", - " this.ws.onmessage = this._make_on_message_function(this);\n", - "\n", - " this.ondownload = ondownload;\n", - "}\n", - "\n", - "mpl.figure.prototype._init_header = function() {\n", - " var titlebar = $(\n", - " '
');\n", - " var titletext = $(\n", - " '
');\n", - " titlebar.append(titletext)\n", - " this.root.append(titlebar);\n", - " this.header = titletext[0];\n", - "}\n", - "\n", - "\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._init_canvas = function() {\n", - " var fig = this;\n", - "\n", - " var canvas_div = $('
');\n", - "\n", - " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", - "\n", - " function canvas_keyboard_event(event) {\n", - " return fig.key_event(event, event['data']);\n", - " }\n", - "\n", - " canvas_div.keydown('key_press', canvas_keyboard_event);\n", - " canvas_div.keyup('key_release', canvas_keyboard_event);\n", - " this.canvas_div = canvas_div\n", - " this._canvas_extra_style(canvas_div)\n", - " this.root.append(canvas_div);\n", - "\n", - " var canvas = $('');\n", - " canvas.addClass('mpl-canvas');\n", - " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", - "\n", - " this.canvas = canvas[0];\n", - " this.context = canvas[0].getContext(\"2d\");\n", - "\n", - " var backingStore = this.context.backingStorePixelRatio ||\n", - "\tthis.context.webkitBackingStorePixelRatio ||\n", - "\tthis.context.mozBackingStorePixelRatio ||\n", - "\tthis.context.msBackingStorePixelRatio ||\n", - "\tthis.context.oBackingStorePixelRatio ||\n", - "\tthis.context.backingStorePixelRatio || 1;\n", - "\n", - " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", - "\n", - " var rubberband = $('');\n", - " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", - "\n", - " var pass_mouse_events = true;\n", - "\n", - " canvas_div.resizable({\n", - " start: function(event, ui) {\n", - " pass_mouse_events = false;\n", - " },\n", - " resize: function(event, ui) {\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " stop: function(event, ui) {\n", - " pass_mouse_events = true;\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " });\n", - "\n", - " function mouse_event_fn(event) {\n", - " if (pass_mouse_events)\n", - " return fig.mouse_event(event, event['data']);\n", - " }\n", - "\n", - " rubberband.mousedown('button_press', mouse_event_fn);\n", - " rubberband.mouseup('button_release', mouse_event_fn);\n", - " // Throttle sequential mouse events to 1 every 20ms.\n", - " rubberband.mousemove('motion_notify', mouse_event_fn);\n", - "\n", - " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", - " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", - "\n", - " canvas_div.on(\"wheel\", function (event) {\n", - " event = event.originalEvent;\n", - " event['data'] = 'scroll'\n", - " if (event.deltaY < 0) {\n", - " event.step = 1;\n", - " } else {\n", - " event.step = -1;\n", - " }\n", - " mouse_event_fn(event);\n", - " });\n", - "\n", - " canvas_div.append(canvas);\n", - " canvas_div.append(rubberband);\n", - "\n", - " this.rubberband = rubberband;\n", - " this.rubberband_canvas = rubberband[0];\n", - " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", - " this.rubberband_context.strokeStyle = \"#000000\";\n", - "\n", - " this._resize_canvas = function(width, height) {\n", - " // Keep the size of the canvas, canvas container, and rubber band\n", - " // canvas in synch.\n", - " canvas_div.css('width', width)\n", - " canvas_div.css('height', height)\n", - "\n", - " canvas.attr('width', width * mpl.ratio);\n", - " canvas.attr('height', height * mpl.ratio);\n", - " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", - "\n", - " rubberband.attr('width', width);\n", - " rubberband.attr('height', height);\n", - " }\n", - "\n", - " // Set the figure to an initial 600x600px, this will subsequently be updated\n", - " // upon first draw.\n", - " this._resize_canvas(600, 600);\n", - "\n", - " // Disable right mouse context menu.\n", - " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", - " return false;\n", - " });\n", - "\n", - " function set_focus () {\n", - " canvas.focus();\n", - " canvas_div.focus();\n", - " }\n", - "\n", - " window.setTimeout(set_focus, 100);\n", - "}\n", - "\n", - "mpl.figure.prototype._init_toolbar = function() {\n", - " var fig = this;\n", - "\n", - " var nav_element = $('
')\n", - " nav_element.attr('style', 'width: 100%');\n", - " this.root.append(nav_element);\n", - "\n", - " // Define a callback function for later on.\n", - " function toolbar_event(event) {\n", - " return fig.toolbar_button_onclick(event['data']);\n", - " }\n", - " function toolbar_mouse_event(event) {\n", - " return fig.toolbar_button_onmouseover(event['data']);\n", - " }\n", - "\n", - " for(var toolbar_ind in mpl.toolbar_items) {\n", - " var name = mpl.toolbar_items[toolbar_ind][0];\n", - " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", - " var image = mpl.toolbar_items[toolbar_ind][2];\n", - " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", - "\n", - " if (!name) {\n", - " // put a spacer in here.\n", - " continue;\n", - " }\n", - " var button = $('');\n", - " button.click(method_name, toolbar_event);\n", - " button.mouseover(tooltip, toolbar_mouse_event);\n", - " nav_element.append(button);\n", - " }\n", - "\n", - " // Add the status bar.\n", - " var status_bar = $('');\n", - " nav_element.append(status_bar);\n", - " this.message = status_bar[0];\n", - "\n", - " // Add the close button to the window.\n", - " var buttongrp = $('
');\n", - " var button = $('');\n", - " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", - " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", - " buttongrp.append(button);\n", - " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", - " titlebar.prepend(buttongrp);\n", - "}\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(el){\n", - " var fig = this\n", - " el.on(\"remove\", function(){\n", - "\tfig.close_ws(fig, {});\n", - " });\n", - "}\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(el){\n", - " // this is important to make the div 'focusable\n", - " el.attr('tabindex', 0)\n", - " // reach out to IPython and tell the keyboard manager to turn it's self\n", - " // off when our div gets focus\n", - "\n", - " // location in version 3\n", - " if (IPython.notebook.keyboard_manager) {\n", - " IPython.notebook.keyboard_manager.register_events(el);\n", - " }\n", - " else {\n", - " // location in version 2\n", - " IPython.keyboard_manager.register_events(el);\n", - " }\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._key_event_extra = function(event, name) {\n", - " var manager = IPython.notebook.keyboard_manager;\n", - " if (!manager)\n", - " manager = IPython.keyboard_manager;\n", - "\n", - " // Check for shift+enter\n", - " if (event.shiftKey && event.which == 13) {\n", - " this.canvas_div.blur();\n", - " // select the cell after this one\n", - " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", - " IPython.notebook.select(index + 1);\n", - " }\n", - "}\n", - "\n", - "mpl.figure.prototype.handle_save = function(fig, msg) {\n", - " fig.ondownload(fig, null);\n", - "}\n", - "\n", - "\n", - "mpl.find_output_cell = function(html_output) {\n", - " // Return the cell and output element which can be found *uniquely* in the notebook.\n", - " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", - " // IPython event is triggered only after the cells have been serialised, which for\n", - " // our purposes (turning an active figure into a static one), is too late.\n", - " var cells = IPython.notebook.get_cells();\n", - " var ncells = cells.length;\n", - " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", - " data = data.data;\n", - " }\n", - " if (data['text/html'] == html_output) {\n", - " return [cell, data, j];\n", - " }\n", - " }\n", - " }\n", - " }\n", - "}\n", - "\n", - "// Register the function which deals with the matplotlib target/channel.\n", - "// The kernel may be null if the page has been refreshed.\n", - "if (IPython.notebook.kernel != null) {\n", - " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", - "}\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 38, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "# Display signal\n", - "plt.figure()\n", - "\n", + "# Prepare the data to be plotted\n", "response = signal_func[0:100,0] * 2\n", - "plt.title('Example event time course and voxel response')\n", "downsample_A = stimfunc_A[0:int(100*temporal_res * tr):int(temporal_res * tr), 0]\n", "downsample_B = stimfunc_B[0:int(100*temporal_res * tr):int(temporal_res * tr), 0]\n", + "\n", + "# Display signal\n", + "plt.figure()\n", + "plt.title('Example event time course and voxel response')\n", "Event_A = plt.plot(downsample_A, 'r', label='Event_A')\n", "Event_B = plt.plot(downsample_B, 'g', label='Event_B')\n", "Response = plt.plot(response, 'b', label='Response')\n", @@ -7090,12 +706,12 @@ "source": [ "*3.7 Establish signal magnitude*\n", "\n", - "When specifying the signal we must determine the amount of activity change each voxel undergoes. fmrisim contains a tool to allow you to choose between a variety of different metrics that you could use to scale the signal. For instance, we can calculate percent signal change (referred to as PSC) by taking the average activity of voxels in an ROI of the noise volume and multiplying it by a proportion to signal the percentage change that this signal maximally evokes. This metric doesn't take account of the variance in the noise but other metrics available do. One metric that does take account of variance, and is used below, is the signal amplitude divided by the temporal variability. The choices that are available for computing this metric are based on Welvaert and Rosseel (2013)." + "When specifying the signal we must determine the amount of activity change each voxel undergoes. fmrisim contains a tool to allow you to choose between a variety of different metrics that you could use to scale the signal. For instance, we can calculate percent signal change (referred to as PSC) by taking the average activity of voxels in an ROI of the noise volume and multiplying it by a proportion to signal the percentage change that this signal maximally evokes. This metric doesn't take account of the variance in the noise but other metrics available do. One metric that does take account of variance, and is used below, is the signal amplitude divided by the temporal variability. The choices that are available for computing the signal scale are based on Welvaert and Rosseel (2013)." ] }, { "cell_type": "code", - "execution_count": 113, + "execution_count": null, "metadata": { "collapsed": true }, @@ -7114,7 +730,7 @@ }, { "cell_type": "code", - "execution_count": 114, + "execution_count": null, "metadata": { "collapsed": true }, @@ -7140,7 +756,7 @@ }, { "cell_type": "code", - "execution_count": 115, + "execution_count": null, "metadata": { "collapsed": true }, @@ -7162,7 +778,7 @@ }, { "cell_type": "code", - "execution_count": 116, + "execution_count": null, "metadata": { "collapsed": true }, @@ -7191,7 +807,7 @@ }, { "cell_type": "code", - "execution_count": 117, + "execution_count": null, "metadata": { "collapsed": true }, @@ -7203,819 +819,22 @@ "lb = (coordinates - ((feature_size - 1) / 2)).astype('int')[0]\n", "ub = (coordinates + ((feature_size - 1) / 2) + 1).astype('int')[0]\n", "\n", + "# Pull out voxels in the ROI for the specified timepoints\n", "trials_A = brain[lb[0]:ub[0], lb[1]:ub[1], lb[2]:ub[2], ((onsets_A + hrf_lag) / tr).astype('int')]\n", "trials_B = brain[lb[0]:ub[0], lb[1]:ub[1], lb[2]:ub[2], ((onsets_B + hrf_lag) / tr).astype('int')]\n", "\n", + "# Reshape data for easy handling\n", "trials_A = trials_A.reshape((voxels, trials_A.shape[3]))\n", "trials_B = trials_B.reshape((voxels, trials_B.shape[3]))" ] }, { "cell_type": "code", - "execution_count": 118, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/javascript": [ - "/* Put everything inside the global mpl namespace */\n", - "window.mpl = {};\n", - "\n", - "\n", - "mpl.get_websocket_type = function() {\n", - " if (typeof(WebSocket) !== 'undefined') {\n", - " return WebSocket;\n", - " } else if (typeof(MozWebSocket) !== 'undefined') {\n", - " return MozWebSocket;\n", - " } else {\n", - " alert('Your browser does not have WebSocket support.' +\n", - " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", - " 'Firefox 4 and 5 are also supported but you ' +\n", - " 'have to enable WebSockets in about:config.');\n", - " };\n", - "}\n", - "\n", - "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", - " this.id = figure_id;\n", - "\n", - " this.ws = websocket;\n", - "\n", - " this.supports_binary = (this.ws.binaryType != undefined);\n", - "\n", - " if (!this.supports_binary) {\n", - " var warnings = document.getElementById(\"mpl-warnings\");\n", - " if (warnings) {\n", - " warnings.style.display = 'block';\n", - " warnings.textContent = (\n", - " \"This browser does not support binary websocket messages. \" +\n", - " \"Performance may be slow.\");\n", - " }\n", - " }\n", - "\n", - " this.imageObj = new Image();\n", - "\n", - " this.context = undefined;\n", - " this.message = undefined;\n", - " this.canvas = undefined;\n", - " this.rubberband_canvas = undefined;\n", - " this.rubberband_context = undefined;\n", - " this.format_dropdown = undefined;\n", - "\n", - " this.image_mode = 'full';\n", - "\n", - " this.root = $('
');\n", - " this._root_extra_style(this.root)\n", - " this.root.attr('style', 'display: inline-block');\n", - "\n", - " $(parent_element).append(this.root);\n", - "\n", - " this._init_header(this);\n", - " this._init_canvas(this);\n", - " this._init_toolbar(this);\n", - "\n", - " var fig = this;\n", - "\n", - " this.waiting = false;\n", - "\n", - " this.ws.onopen = function () {\n", - " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", - " fig.send_message(\"send_image_mode\", {});\n", - " if (mpl.ratio != 1) {\n", - " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", - " }\n", - " fig.send_message(\"refresh\", {});\n", - " }\n", - "\n", - " this.imageObj.onload = function() {\n", - " if (fig.image_mode == 'full') {\n", - " // Full images could contain transparency (where diff images\n", - " // almost always do), so we need to clear the canvas so that\n", - " // there is no ghosting.\n", - " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", - " }\n", - " fig.context.drawImage(fig.imageObj, 0, 0);\n", - " };\n", - "\n", - " this.imageObj.onunload = function() {\n", - " this.ws.close();\n", - " }\n", - "\n", - " this.ws.onmessage = this._make_on_message_function(this);\n", - "\n", - " this.ondownload = ondownload;\n", - "}\n", - "\n", - "mpl.figure.prototype._init_header = function() {\n", - " var titlebar = $(\n", - " '
');\n", - " var titletext = $(\n", - " '
');\n", - " titlebar.append(titletext)\n", - " this.root.append(titlebar);\n", - " this.header = titletext[0];\n", - "}\n", - "\n", - "\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._init_canvas = function() {\n", - " var fig = this;\n", - "\n", - " var canvas_div = $('
');\n", - "\n", - " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", - "\n", - " function canvas_keyboard_event(event) {\n", - " return fig.key_event(event, event['data']);\n", - " }\n", - "\n", - " canvas_div.keydown('key_press', canvas_keyboard_event);\n", - " canvas_div.keyup('key_release', canvas_keyboard_event);\n", - " this.canvas_div = canvas_div\n", - " this._canvas_extra_style(canvas_div)\n", - " this.root.append(canvas_div);\n", - "\n", - " var canvas = $('');\n", - " canvas.addClass('mpl-canvas');\n", - " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", - "\n", - " this.canvas = canvas[0];\n", - " this.context = canvas[0].getContext(\"2d\");\n", - "\n", - " var backingStore = this.context.backingStorePixelRatio ||\n", - "\tthis.context.webkitBackingStorePixelRatio ||\n", - "\tthis.context.mozBackingStorePixelRatio ||\n", - "\tthis.context.msBackingStorePixelRatio ||\n", - "\tthis.context.oBackingStorePixelRatio ||\n", - "\tthis.context.backingStorePixelRatio || 1;\n", - "\n", - " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", - "\n", - " var rubberband = $('');\n", - " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", - "\n", - " var pass_mouse_events = true;\n", - "\n", - " canvas_div.resizable({\n", - " start: function(event, ui) {\n", - " pass_mouse_events = false;\n", - " },\n", - " resize: function(event, ui) {\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " stop: function(event, ui) {\n", - " pass_mouse_events = true;\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " });\n", - "\n", - " function mouse_event_fn(event) {\n", - " if (pass_mouse_events)\n", - " return fig.mouse_event(event, event['data']);\n", - " }\n", - "\n", - " rubberband.mousedown('button_press', mouse_event_fn);\n", - " rubberband.mouseup('button_release', mouse_event_fn);\n", - " // Throttle sequential mouse events to 1 every 20ms.\n", - " rubberband.mousemove('motion_notify', mouse_event_fn);\n", - "\n", - " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", - " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", - "\n", - " canvas_div.on(\"wheel\", function (event) {\n", - " event = event.originalEvent;\n", - " event['data'] = 'scroll'\n", - " if (event.deltaY < 0) {\n", - " event.step = 1;\n", - " } else {\n", - " event.step = -1;\n", - " }\n", - " mouse_event_fn(event);\n", - " });\n", - "\n", - " canvas_div.append(canvas);\n", - " canvas_div.append(rubberband);\n", - "\n", - " this.rubberband = rubberband;\n", - " this.rubberband_canvas = rubberband[0];\n", - " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", - " this.rubberband_context.strokeStyle = \"#000000\";\n", - "\n", - " this._resize_canvas = function(width, height) {\n", - " // Keep the size of the canvas, canvas container, and rubber band\n", - " // canvas in synch.\n", - " canvas_div.css('width', width)\n", - " canvas_div.css('height', height)\n", - "\n", - " canvas.attr('width', width * mpl.ratio);\n", - " canvas.attr('height', height * mpl.ratio);\n", - " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", - "\n", - " rubberband.attr('width', width);\n", - " rubberband.attr('height', height);\n", - " }\n", - "\n", - " // Set the figure to an initial 600x600px, this will subsequently be updated\n", - " // upon first draw.\n", - " this._resize_canvas(600, 600);\n", - "\n", - " // Disable right mouse context menu.\n", - " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", - " return false;\n", - " });\n", - "\n", - " function set_focus () {\n", - " canvas.focus();\n", - " canvas_div.focus();\n", - " }\n", - "\n", - " window.setTimeout(set_focus, 100);\n", - "}\n", - "\n", - "mpl.figure.prototype._init_toolbar = function() {\n", - " var fig = this;\n", - "\n", - " var nav_element = $('
')\n", - " nav_element.attr('style', 'width: 100%');\n", - " this.root.append(nav_element);\n", - "\n", - " // Define a callback function for later on.\n", - " function toolbar_event(event) {\n", - " return fig.toolbar_button_onclick(event['data']);\n", - " }\n", - " function toolbar_mouse_event(event) {\n", - " return fig.toolbar_button_onmouseover(event['data']);\n", - " }\n", - "\n", - " for(var toolbar_ind in mpl.toolbar_items) {\n", - " var name = mpl.toolbar_items[toolbar_ind][0];\n", - " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", - " var image = mpl.toolbar_items[toolbar_ind][2];\n", - " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", - "\n", - " if (!name) {\n", - " // put a spacer in here.\n", - " continue;\n", - " }\n", - " var button = $('');\n", - " button.click(method_name, toolbar_event);\n", - " button.mouseover(tooltip, toolbar_mouse_event);\n", - " nav_element.append(button);\n", - " }\n", - "\n", - " // Add the status bar.\n", - " var status_bar = $('');\n", - " nav_element.append(status_bar);\n", - " this.message = status_bar[0];\n", - "\n", - " // Add the close button to the window.\n", - " var buttongrp = $('
');\n", - " var button = $('');\n", - " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", - " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", - " buttongrp.append(button);\n", - " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", - " titlebar.prepend(buttongrp);\n", - "}\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(el){\n", - " var fig = this\n", - " el.on(\"remove\", function(){\n", - "\tfig.close_ws(fig, {});\n", - " });\n", - "}\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(el){\n", - " // this is important to make the div 'focusable\n", - " el.attr('tabindex', 0)\n", - " // reach out to IPython and tell the keyboard manager to turn it's self\n", - " // off when our div gets focus\n", - "\n", - " // location in version 3\n", - " if (IPython.notebook.keyboard_manager) {\n", - " IPython.notebook.keyboard_manager.register_events(el);\n", - " }\n", - " else {\n", - " // location in version 2\n", - " IPython.keyboard_manager.register_events(el);\n", - " }\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._key_event_extra = function(event, name) {\n", - " var manager = IPython.notebook.keyboard_manager;\n", - " if (!manager)\n", - " manager = IPython.keyboard_manager;\n", - "\n", - " // Check for shift+enter\n", - " if (event.shiftKey && event.which == 13) {\n", - " this.canvas_div.blur();\n", - " // select the cell after this one\n", - " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", - " IPython.notebook.select(index + 1);\n", - " }\n", - "}\n", - "\n", - "mpl.figure.prototype.handle_save = function(fig, msg) {\n", - " fig.ondownload(fig, null);\n", - "}\n", - "\n", - "\n", - "mpl.find_output_cell = function(html_output) {\n", - " // Return the cell and output element which can be found *uniquely* in the notebook.\n", - " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", - " // IPython event is triggered only after the cells have been serialised, which for\n", - " // our purposes (turning an active figure into a static one), is too late.\n", - " var cells = IPython.notebook.get_cells();\n", - " var ncells = cells.length;\n", - " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", - " data = data.data;\n", - " }\n", - " if (data['text/html'] == html_output) {\n", - " return [cell, data, j];\n", - " }\n", - " }\n", - " }\n", - " }\n", - "}\n", - "\n", - "// Register the function which deals with the matplotlib target/channel.\n", - "// The kernel may be null if the page has been refreshed.\n", - "if (IPython.notebook.kernel != null) {\n", - " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", - "}\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 119, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ + "# Calculate the distance matrix between trial types\n", "distance_matrix = sp_distance.squareform(sp_distance.pdist(np.vstack([trials_A.transpose(), trials_B.transpose()])))\n", "\n", "mds = manifold.MDS(n_components=2, dissimilarity='precomputed') # Fit the mds object\n", "coords = mds.fit(distance_matrix).embedding_ # Find the mds coordinates\n", + "\n", + "# Plot the data\n", "plt.figure()\n", "plt.scatter(coords[:, 0], coords[:, 1], c=['red'] * trials_A.shape[1] + ['green'] * trials_B.shape[1])\n", "plt.axis('off')\n", @@ -8862,24 +884,14 @@ }, { "cell_type": "code", - "execution_count": 120, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Mean difference between condition A and B: -0.49\n", - "pvalue: 0.813\n" - ] - } - ], + "outputs": [], "source": [ "mean_difference = (np.mean(trials_A,0) - np.mean(trials_B,0))\n", "ttest = stats.ttest_1samp(mean_difference, 0)\n", "\n", - "print('Mean difference between condition A and B: ' + str(mean_difference.mean())[0:5])\n", - "print('pvalue: '+ str(ttest.pvalue)[0:5])" + "print('Mean difference between condition A and B: %0.2f\\np value: %0.3f' % mean_difference.mean(), ttest.pvalue)" ] }, { @@ -8893,22 +905,14 @@ }, { "cell_type": "code", - "execution_count": 121, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Classification accuracy between condition A and B: 0.9\n" - ] - } - ], + "outputs": [], "source": [ + "# Get the inputs to the SVM\n", "input_mat = np.vstack([trials_A.transpose(), trials_B.transpose()])\n", "input_labels = trials_A.shape[1] * [1] + trials_B.shape[1] * [0]\n", "\n", - "\n", "# Set up the classifier\n", "X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(\n", " input_mat, input_labels, test_size=0.2, random_state=0)\n", @@ -8916,7 +920,7 @@ "clf = sklearn.svm.SVC(kernel='linear', C=1).fit(X_train, y_train)\n", "\n", "score = clf.score(X_test, y_test)\n", - "print('Classification accuracy between condition A and B: ' + str(score)[0:5])" + "print('Classification accuracy between condition A and B: %0.3f' + score)" ] }, { From 4c3472cc07a26ee8746f2fd36c1ff4117c91a274 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Mon, 12 Mar 2018 16:49:38 -0400 Subject: [PATCH 11/51] Improved the fitting procedure, especially for AR --- brainiak/utils/fmrisim.py | 45 +++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index c0a88b541..3a722b4c8 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1189,7 +1189,7 @@ def _calc_ARMA_noise(volume, mask, auto_reg_order=2, ma_order=1, - sample_num=10, + sample_num=100, ): """ Calculate the the ARMA noise of a volume This calculates the autoregressive and moving average noise of the volume @@ -2347,17 +2347,22 @@ def generate_noise(dimensions, # If there are iterations left to perform then recalculate the # metrics and try again - alpha = 0.5 - sfnr_threshold = 1 - snr_threshold = 0.1 + fit_delta = 0.95 + fit_thresh = 0.05 + target_sfnr = noise_dict['sfnr'] + target_snr = noise_dict['snr'] if iteration < iterations[0]: # Calculate the new metrics new_sfnr = _calc_sfnr(noise, mask) new_snr = _calc_snr(noise, mask) + # Calculate the difference between the real and simulated data + diff_sfnr = abs(new_sfnr - target_sfnr) / target_sfnr + diff_snr = abs(new_snr - target_snr) / target_snr + # If the AR is sufficiently close then break the loop - if abs(new_sfnr) < sfnr_threshold and abs(new_snr) < snr_threshold: + if diff_sfnr < fit_thresh and diff_snr < fit_thresh: print('Terminated SNR and SFNR fit after ' + str( iteration) + ' iterations.') break @@ -2367,10 +2372,11 @@ def generate_noise(dimensions, spat_sd_new = mean_signal / new_snr # Update the variables - temporal_sd_system -= ((temp_sd_new - temp_sd_orig) * alpha) - spatial_sd -= ((spat_sd_new - spat_sd_orig) * alpha) + temporal_sd_system -= ((temp_sd_new - temp_sd_orig) * fit_delta) + spatial_sd -= ((spat_sd_new - spat_sd_orig) * fit_delta) # Iterate through different MA parameters to fit AR + print('Target AR: %0.3f\n' % noise_dict['auto_reg_rho'][0]) for iteration in list(range(iterations[1] + 1)): # Generate the noise @@ -2397,27 +2403,30 @@ def generate_noise(dimensions, # If there are iterations left to perform then recalculate the # metrics and try again - alpha = 0.95 - ar_threshold = 0.025 + target_ar = noise_dict['auto_reg_rho'] if iteration < iterations[1]: # Calculate the new metrics - auto_reg_rho, _ = _calc_ARMA_noise(noise, - mask, - len(noise_dict['auto_reg_rho']), - len(noise_dict['ma_rho']), - ) + new_ar, _ = _calc_ARMA_noise(noise, + mask, + len(noise_dict['auto_reg_rho']), + len(noise_dict['ma_rho']), + ) # Calculate the difference in the first AR component - AR_0_diff = auto_reg_rho[0] - noise_dict['auto_reg_rho'][0] - noise_dict['ma_rho'] = [noise_dict['ma_rho'][0] - (AR_0_diff * - alpha)] + ar_0_diff = abs(new_ar[0] - target_ar[0]) / target_ar[0] + + print('New AR: %0.3f\n' % new_ar[0]) # If the AR is sufficiently close then break the loop - if abs(AR_0_diff) < ar_threshold: + if ar_0_diff < fit_thresh: print('Terminated AR fit after ' + str(iteration) + ' iterations.') break + else: + # Otherwise update the ma coefficient + noise_dict['ma_rho'] = [noise_dict['ma_rho'][0] - (ar_0_diff * + fit_delta)] return noise From 07d24256070c7060088a7ffc5d0ca84f843582ba Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 13 Mar 2018 21:58:14 -0400 Subject: [PATCH 12/51] Updated how fitting is done and how machine noise is calculated --- brainiak/utils/fmrisim.py | 434 +- .../utils/fmrisim_multivariate_example.ipynb | 7543 ++++++++++++++++- tests/utils/test_fmrisim.py | 40 +- 3 files changed, 7733 insertions(+), 284 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 3a722b4c8..d0ea6a5eb 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1112,9 +1112,9 @@ def _calc_sfnr(volume, def _calc_snr(volume, mask, - dilation=0, + dilation=5, tr=None, - remove_baseline=False, + template_baseline=None, ): """ Calculate the the SNR of a volume Calculates the Signal to Noise Ratio, the mean of brain voxels @@ -1136,13 +1136,24 @@ def _calc_snr(volume, dilation : int How many binary dilations do you want to perform on the mask to - determine the non-brain voxels + determine the non-brain voxels. If you increase this the SNR + increases and the non-brain voxels (after baseline subtraction) more + closely resemble a gaussian tr : int Integer specifying TR to calculate the SNR for - remove_baseline : bool - Is the baseline (a.k.a. temporal mean activation) removed. + template_baseline : 3d array, float + Do you want to subtract the baseline (a.k.a. temporal mean + activation, the template), to observe the noise in addition to this + in peripherial regions? Using this procedure means that only noise + that is not average is modeled. It is particularly valuable for the + simulator because the machine noise is added to the template. + However, the noise profile is different when you subtract (it + becomes somewhat gaussian). Moreover, the amount of noise + perturbation reflects the baseline value (voxels that are high on + average across time have more noise than voxels that have low + averages), If this is 'None' then this subtraction won't occur. Returns ------- @@ -1164,22 +1175,22 @@ def _calc_snr(volume, else: mask_dilated = mask - # Make a matrix of brain and non_brain voxels by time - brain_voxels = volume[mask > 0] - nonbrain_voxels = volume[:, :, :, tr][mask_dilated == 0] + # Make a matrix of brain and non_brain voxels, selecting the timepoint + brain_voxels = volume[mask > 0][:, tr] + nonbrain_voxels = (volume[:, :, :, tr]).astype('float64') + + # Do you want to remove the average of the periphery (removes + # structure, leaving only variability) + if template_baseline is not None: + nonbrain_voxels -= template_baseline - # Find the mean of the non_brain voxels (deals with structure that may + nonbrain_voxels = nonbrain_voxels[mask_dilated == 0] # Take the means of each voxel over time mean_voxels = np.nanmean(brain_voxels) # Find the standard deviation of the voxels - if remove_baseline == True: - # exist outside of the mask) - nonbrain_voxels_mean = np.mean(volume[mask_dilated == 0], 1) - std_voxels = np.nanstd(nonbrain_voxels - nonbrain_voxels_mean) - else: - std_voxels = np.nanstd(nonbrain_voxels) + std_voxels = np.nanstd(nonbrain_voxels) # Return the snr return mean_voxels / std_voxels @@ -1267,7 +1278,8 @@ def _calc_ARMA_noise(volume, return auto_reg_rho, ma_rho def calc_noise(volume, - mask=None, + mask, + template, noise_dict=None, ): """ Calculates the noise properties of the volume supplied. @@ -1285,7 +1297,13 @@ def calc_noise(volume, mask : 3d numpy array, binary A binary mask of the brain, the same size as the volume + template : 3d array, float + A continuous (0 -> 1) volume describing the likelihood a voxel is in + the brain. This can be used to contrast the brain and non brain. + noise_dict : dict + The initialized dictionary of the calculated noise parameters of the + provided dataset (usually it is only the voxel size) Returns ------- @@ -1347,6 +1365,7 @@ def calc_noise(volume, noise_dict['fwhm'] = np.mean(fwhm) noise_dict['snr'] = _calc_snr(volume, mask, + template_baseline=template, ) # Return the noise dictionary @@ -1356,15 +1375,28 @@ def calc_noise(volume, def _generate_noise_system(dimensions_tr, spatial_sd, temporal_sd, - spatial_noise_type='exponential', - temporal_noise_type='exponential', + spatial_noise_type='gaussian', + temporal_noise_type='gaussian', ): """Generate the scanner noise - Generate system noise, either rician or exponential, for the scanner. - Low SNR scans tend to have rician noise whereas high SNR scans (>30) are - better modelled by exponential noise. Generates a distribution with a SD - of 1. + Generate system noise, either rician, gaussian or exponential, for the + scanner. Generates a distribution with a SD of 1. If you look at the + distribution of non-brain voxel intensity in modern scans you will see + it is rician. However, depending on how you have calculated the SNR and + whether the template is being used you want to use this function + differently: the voxels in the non-brain are stable over time and + usually reflect structure in the MR signal (e.g. the + baseline MR of the head coil or skull). Hence the template captures this + rician noise structure. If you are adding the machine noise to the + template, as is done in generate_noise, then you are likely doubling up + on the addition of machine noise. To correct for this you can instead + calculate the SNR with the baseline removed (give template_baseline the + template, which is the default) and then create machine noise as a + gaussian around this baseline. The residual noise is approximately + gaussian in regions far from the brain but becomes much more kurtotic + towards the brain centre, which is captured when you combine the + baseline with the gaussian. Parameters ---------- @@ -1374,8 +1406,11 @@ def _generate_noise_system(dimensions_tr, noise into. This can be a volume of any size noise_type : str - String specifying the noise type. Rician is appropriate when the SNR is - low but is insufficiently skewed to appropriately model high SNR data. + String specifying the noise type. If you aren't specifying the noise + template then Rician is the appropriate model of noise. However, + if you are subtracting the template, as is default, then you should + use gaussian. (If the dilation parameter of _calc_snr is <10 then + gaussian is only an approximation) Returns ---------- @@ -1384,9 +1419,9 @@ def _generate_noise_system(dimensions_tr, Create a volume with system noise """ - def generate_noise_volume(dimensions, - noise_type, - ): + def noise_volume(dimensions, + noise_type, + ): if noise_type == 'rician': # Generate the Rician noise (has an SD of 1) @@ -1407,8 +1442,8 @@ def generate_noise_volume(dimensions, 1]) # Generate noise - spatial_noise = generate_noise_volume(dimensions, spatial_noise_type) - temporal_noise = generate_noise_volume(dimensions_tr, temporal_noise_type) + spatial_noise = noise_volume(dimensions, spatial_noise_type) + temporal_noise = noise_volume(dimensions_tr, temporal_noise_type) # Since you are combining spatial and temporal noise, you need to # subtract the variance of the two to get the spatial sd @@ -1418,15 +1453,9 @@ def generate_noise_volume(dimensions, # If this is below zero then all the noise will be temporal spatial_sd = 0 - # # Mean centre, while preserving the SD - # spatial_noise = spatial_noise - spatial_noise.mean() - # Make the system noise have a specific spatial variability spatial_noise *= spatial_sd - # # Mean centre, while preserving the SD - # temporal_noise = temporal_noise - temporal_noise.mean() - # Set the size of the noise temporal_noise *= temporal_sd @@ -1438,7 +1467,7 @@ def generate_noise_volume(dimensions, 1) temporal_noise = temporal_noise - (temporal_noise_mean - spatial_noise) - # Save the size of the noise + # Save the combination system_noise = spatial_noise + temporal_noise return system_noise @@ -2197,21 +2226,21 @@ def _noise_dict_update(noise_dict): if 'task_sigma' not in noise_dict: noise_dict['task_sigma'] = 0 if 'drift_sigma' not in noise_dict: - noise_dict['drift_sigma'] = 0.45 + noise_dict['drift_sigma'] = 0 if 'auto_reg_sigma' not in noise_dict: - noise_dict['auto_reg_sigma'] = 0.5 + noise_dict['auto_reg_sigma'] = 1 if 'auto_reg_rho' not in noise_dict: - noise_dict['auto_reg_rho'] = [0.5] + noise_dict['auto_reg_rho'] = [1.0, -0.5] if 'ma_rho' not in noise_dict: noise_dict['ma_rho'] = [0] if 'physiological_sigma' not in noise_dict: - noise_dict['physiological_sigma'] = 0.1 + noise_dict['physiological_sigma'] = 0 if 'sfnr' not in noise_dict: - noise_dict['sfnr'] = 80 + noise_dict['sfnr'] = 90 if 'snr' not in noise_dict: - noise_dict['snr'] = 7 + noise_dict['snr'] = 25 if 'max_activity' not in noise_dict: - noise_dict['max_activity'] = 1500 + noise_dict['max_activity'] = 1000 if 'voxel_size' not in noise_dict: noise_dict['voxel_size'] = [1.0, 1.0, 1.0] if 'fwhm' not in noise_dict: @@ -2219,6 +2248,167 @@ def _noise_dict_update(noise_dict): return noise_dict +def _fit_snr_sfnr(noise, + noise_temporal, + mask, + template, + spatial_sd, + temporal_sd, + temporal_proportion, + noise_dict, + fit_thresh, + fit_delta, + iterations, + ): + """ + Fit the noise model to match the SNR and SFNR of the data + + Parameters + ---------- + + noise_dict : dict + A dictionary specifying the types of noise in this experiment. The + noise types interact in important ways. First, all noise types + ending with sigma (e.g. motion sigma) are mixed together in + _generate_temporal_noise. These values describe the proportion of + mixing of these elements. However critically, SFNR is the + parameter that describes how much noise these components contribute + to the brain. + + Returns + ------- + + noise : multidimensional array, float + Generates the noise volume given these parameters + + """ + + # Pull out information that is needed + dim_tr = noise.shape + base = template * noise_dict['max_activity'] + base = base.reshape(dim_tr[0], dim_tr[1], dim_tr[2], 1) + mean_signal = (base[mask > 0]).mean() + target_sfnr = noise_dict['sfnr'] + target_snr = noise_dict['snr'] + + # Iterate through different parameters to fit SNR and SFNR + spat_sd_orig = np.copy(spatial_sd) + temp_sd_orig = np.copy(temporal_sd) + for iteration in list(range(iterations)): + + # Calculate the new metrics + new_sfnr = _calc_sfnr(noise, mask) + new_snr = _calc_snr(noise, mask, template_baseline=template) + + # Calculate the difference between the real and simulated data + diff_sfnr = abs(new_sfnr - target_sfnr) / target_sfnr + diff_snr = abs(new_snr - target_snr) / target_snr + + # If the AR is sufficiently close then break the loop + if diff_sfnr < fit_thresh and diff_snr < fit_thresh: + print('Terminated SNR and SFNR fit after ' + str( + iteration) + ' iterations.') + break + + # Convert the SFNR and SNR + temp_sd_new = np.sqrt(((mean_signal / new_sfnr) ** 2) * + temporal_proportion) + spat_sd_new = mean_signal / new_snr + + # Update the variables + temporal_sd -= ((temp_sd_new - temp_sd_orig) * fit_delta) + spatial_sd -= ((spat_sd_new - spat_sd_orig) * fit_delta) + + # Prevent these going out of range + if temporal_sd < 0 or np.isnan(temporal_sd): + temporal_sd = 10e-3 + if spatial_sd < 0 or np.isnan(spatial_sd): + spatial_sd = 10e-3 + + # Set up the machine noise + noise_system = _generate_noise_system(dimensions_tr=dim_tr, + spatial_sd=spatial_sd, + temporal_sd=temporal_sd, + ) + + # Sum up the noise of the brain + noise = base + (noise_temporal * (1 - temporal_sd)) + noise_system + + # Reject negative values (only happens outside of the brain) + noise[noise < 0] = 0 + + # Return the updated noise + return noise, spatial_sd, temporal_sd + +def _fit_ar(noise, + mask, + template, + stimfunction_tr, + tr_duration, + spatial_sd, + temporal_sd, + noise_dict, + fit_thresh, + fit_delta, + iterations, + ): + + # Pull out the + dim_tr = noise.shape + dim = dim_tr[0:3] + base = template * noise_dict['max_activity'] + base = base.reshape(dim[0], dim[1], dim[2], 1) + + # Iterate through different MA parameters to fit AR + for iteration in list(range(iterations)): + + # If there are iterations left to perform then recalculate the + # metrics and try again + target_ar = noise_dict['auto_reg_rho'] + + # Calculate the new metrics + new_ar, _ = _calc_ARMA_noise(noise, + mask, + len(noise_dict['auto_reg_rho']), + len(noise_dict['ma_rho']), + ) + + # Calculate the difference in the first AR component + ar_0_diff = abs(new_ar[0] - target_ar[0]) / target_ar[0] + + # If the AR is sufficiently close then break the loop + if ar_0_diff < fit_thresh: + print('Terminated AR fit after ' + str(iteration) + + ' iterations.') + break + else: + # Otherwise update the ma coefficient + noise_dict['ma_rho'] = [noise_dict['ma_rho'][0] - (ar_0_diff * + fit_delta)] + + # Generate the noise. The appropriate + noise_temporal = _generate_noise_temporal(stimfunction_tr, + tr_duration, + dim, + template, + mask, + noise_dict, + ) + + # Set up the machine noise + noise_system = _generate_noise_system(dimensions_tr=dim_tr, + spatial_sd=spatial_sd, + temporal_sd=temporal_sd, + ) + + # Sum up the noise of the brain + noise = base + (noise_temporal * (1 - temporal_sd)) + noise_system + + # Reject negative values (only happens outside of the brain) + noise[noise < 0] = 0 + + # Return the updated noise + return noise def generate_noise(dimensions, stimfunction_tr, @@ -2227,7 +2417,9 @@ def generate_noise(dimensions, mask=None, noise_dict=None, temporal_proportion=0.5, - iterations=[20, 5], + iterations=[5, 20], + fit_thresh=0.05, + fit_delta=0.5, ): """ Generate the noise to be added to the signal. Default noise parameters will create a noise volume with a standard @@ -2268,11 +2460,21 @@ def generate_noise(dimensions, iterations : list, int The first element is how many steps of fitting the SFNR and SNR values - will be performed. Usually converges after < 10. The second element + will be performed. Usually converges after < 5. The second element is the number of iterations for the AR fitting. This is much more time consuming (has to make a new timecourse on each iteration) so be careful about setting this appropriately. + fit_thresh : float + What proportion of the target parameter value is sufficient error to + warrant finishing fit search. + + fit_delta : float + How much are the parameters attenuated during the fitting process, + in terms of the proportion of difference between the target + parameter and the actual parameter + + Returns ---------- @@ -2328,107 +2530,49 @@ def generate_noise(dimensions, # What is the standard deviation of the background activity spatial_sd = mean_signal / noise_dict['snr'] - # Iterate through different parameters to fit SNR and SFNR - spat_sd_orig = np.copy(spatial_sd) - temp_sd_orig = np.copy(temporal_sd_system) - for iteration in list(range(iterations[0] + 1)): - # Set up the machine noise - noise_system = _generate_noise_system(dimensions_tr=dimensions_tr, - spatial_sd=spatial_sd, - temporal_sd=temporal_sd_system, - ) - - # Sum up the noise of the brain - noise = base + (noise_temporal * (1 - temporal_sd_system)) + \ - noise_system - - # Reject negative values (only happens outside of the brain) - noise[noise < 0] = 0 - - # If there are iterations left to perform then recalculate the - # metrics and try again - fit_delta = 0.95 - fit_thresh = 0.05 - target_sfnr = noise_dict['sfnr'] - target_snr = noise_dict['snr'] - if iteration < iterations[0]: - - # Calculate the new metrics - new_sfnr = _calc_sfnr(noise, mask) - new_snr = _calc_snr(noise, mask) - - # Calculate the difference between the real and simulated data - diff_sfnr = abs(new_sfnr - target_sfnr) / target_sfnr - diff_snr = abs(new_snr - target_snr) / target_snr - - # If the AR is sufficiently close then break the loop - if diff_sfnr < fit_thresh and diff_snr < fit_thresh: - print('Terminated SNR and SFNR fit after ' + str( - iteration) + ' iterations.') - break - - temp_sd_new = np.sqrt(((mean_signal / new_sfnr) ** 2) * - temporal_proportion) - spat_sd_new = mean_signal / new_snr - - # Update the variables - temporal_sd_system -= ((temp_sd_new - temp_sd_orig) * fit_delta) - spatial_sd -= ((spat_sd_new - spat_sd_orig) * fit_delta) - - # Iterate through different MA parameters to fit AR - print('Target AR: %0.3f\n' % noise_dict['auto_reg_rho'][0]) - for iteration in list(range(iterations[1] + 1)): - - # Generate the noise - noise_temporal = _generate_noise_temporal(stimfunction_tr, - tr_duration, - dimensions, - template, - mask, - noise_dict, - ) - - # Set up the machine noise - noise_system = _generate_noise_system(dimensions_tr=dimensions_tr, - spatial_sd=spatial_sd, - temporal_sd=temporal_sd_system, - ) - - # Sum up the noise of the brain - noise = base + (noise_temporal * (1 - temporal_sd_system)) + \ - noise_system - - # Reject negative values (only happens outside of the brain) - noise[noise < 0] = 0 - - # If there are iterations left to perform then recalculate the - # metrics and try again - target_ar = noise_dict['auto_reg_rho'] - if iteration < iterations[1]: - - # Calculate the new metrics - new_ar, _ = _calc_ARMA_noise(noise, - mask, - len(noise_dict['auto_reg_rho']), - len(noise_dict['ma_rho']), - ) - - # Calculate the difference in the first AR component - ar_0_diff = abs(new_ar[0] - target_ar[0]) / target_ar[0] - - print('New AR: %0.3f\n' % new_ar[0]) - - # If the AR is sufficiently close then break the loop - if ar_0_diff < fit_thresh: - print('Terminated AR fit after ' + str(iteration) + - ' iterations.') - break - else: - # Otherwise update the ma coefficient - noise_dict['ma_rho'] = [noise_dict['ma_rho'][0] - (ar_0_diff * - fit_delta)] - + # Set up the machine noise + noise_system = _generate_noise_system(dimensions_tr=dimensions_tr, + spatial_sd=spatial_sd, + temporal_sd=temporal_sd_system, + ) + + # Sum up the noise of the brain + noise = base + (noise_temporal * (1 - temporal_sd_system)) + \ + noise_system + + # Reject negative values (only happens outside of the brain) + noise[noise < 0] = 0 + + # Fit the SNR and SFNR + noise, spatial_sd, temporal_sd_system = _fit_snr_sfnr(noise, + noise_temporal, + mask, + template, + spatial_sd, + temporal_sd_system, + temporal_proportion, + noise_dict, + fit_thresh, + fit_delta, + iterations[0], + ) + + # Fit the AR + noise = _fit_ar(noise, + mask, + template, + stimfunction_tr, + tr_duration, + spatial_sd, + temporal_sd, + noise_dict, + fit_thresh, + fit_delta, + iterations[1], + ) + + # Return the noise return noise def compute_signal_change(signal_function, diff --git a/examples/utils/fmrisim_multivariate_example.ipynb b/examples/utils/fmrisim_multivariate_example.ipynb index 221739219..9b9e613f2 100644 --- a/examples/utils/fmrisim_multivariate_example.ipynb +++ b/examples/utils/fmrisim_multivariate_example.ipynb @@ -49,9 +49,18 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.\n", + " from pandas.core import datetools\n" + ] + } + ], "source": [ "%matplotlib notebook\n", "\n", @@ -60,6 +69,7 @@ "import nibabel\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", + "import scipy.ndimage as ndimage\n", "import scipy.spatial.distance as sp_distance\n", "import sklearn.manifold as manifold\n", "import scipy.stats as stats\n", @@ -67,6 +77,26 @@ "import sklearn.svm" ] }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import sys;" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "sys.path.append('/Users/cellis/Documents/MATLAB/Analysis_BrainIAK/')" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -78,7 +108,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": { "collapsed": true }, @@ -100,9 +130,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(64, 64, 27, 294)\n" + ] + } + ], "source": [ "dim = volume.shape # What is the size of the volume\n", "dimsize = nii.header.get_zooms() # Get voxel dimensions from the nifti header\n", @@ -123,7 +161,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": { "collapsed": true }, @@ -149,23 +187,60 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/base/model.py:473: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n", + " 'available', HessianInversionWarning)\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/base/model.py:496: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n", + " \"Check mle_retvals\", ConvergenceWarning)\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/tsa/tsatools.py:584: RuntimeWarning: overflow encountered in exp\n", + " newparams = ((1-np.exp(-params))/\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/tsa/tsatools.py:585: RuntimeWarning: overflow encountered in exp\n", + " (1+np.exp(-params))).copy()\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/tsa/tsatools.py:585: RuntimeWarning: invalid value encountered in true_divide\n", + " (1+np.exp(-params))).copy()\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/tsa/tsatools.py:586: RuntimeWarning: overflow encountered in exp\n", + " tmp = ((1-np.exp(-params))/\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/tsa/tsatools.py:587: RuntimeWarning: overflow encountered in exp\n", + " (1+np.exp(-params))).copy()\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/tsa/tsatools.py:587: RuntimeWarning: invalid value encountered in true_divide\n", + " (1+np.exp(-params))).copy()\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/base/model.py:496: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n", + " \"Check mle_retvals\", ConvergenceWarning)\n" + ] + } + ], "source": [ "# Calculate the noise parameters from the data\n", "noise_dict = {'voxel_size': [dimsize[0], dimsize[1], dimsize[2]]}\n", "noise_dict = fmrisim.calc_noise(volume=volume,\n", " mask=mask,\n", + " template=template,\n", " noise_dict=noise_dict,\n", " )" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Noise parameters of the data were estimated as follows:\n", + "SNR: 22.8951160877\n", + "SFNR: 70.7171164885\n", + "FWHM: 5.65314304468\n" + ] + } + ], "source": [ "print('Noise parameters of the data were estimated as follows:')\n", "print('SNR: ' + str(noise_dict['snr']))\n", @@ -183,9 +258,25 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/cellis/anaconda/lib/python3.6/site-packages/scipy/stats/stats.py:2245: RuntimeWarning: invalid value encountered in true_divide\n", + " np.expand_dims(sstd, axis=axis))\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Terminated SNR and SFNR fit after 7 iterations.\n" + ] + } + ], "source": [ "# Calculate the noise given the parameters\n", "noise = fmrisim.generate_noise(dimensions=dim[0:3],\n", @@ -200,9 +291,809 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " this.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " // select the cell after this one\n", + " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", + " IPython.notebook.select(index + 1);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "(-0.5, 63.5, 63.5, -0.5)" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Plot spatial noise\n", "low_spatial = fmrisim._generate_noise_spatial(dim[0:3],\n", @@ -247,7 +1938,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": { "collapsed": true }, @@ -280,9 +1971,809 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 17, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " this.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " // select the cell after this one\n", + " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", + " IPython.notebook.select(index + 1);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "# Remove all non brain voxels\n", - "system = volume[mask == 0]\n", + "# Dilate the mask so as to only take voxels far from the brain (performed in calc_noise)\n", + "mask_dilated = ndimage.morphology.binary_dilation(mask, iterations=10)\n", "\n", - "idxs = list(range(system.shape[0]))\n", - "np.random.shuffle(idxs)\n", + "# Remove all non brain voxels\n", + "system_all = volume[mask_dilated == 0] # Pull out all the non brain voxels in the first TR\n", + "system_baseline = volume - (template.reshape(dim[0], dim[1], dim[2], 1) * noise_dict['max_activity']) # Subtract the baseline before masking\n", + "system_baseline = system_baseline[mask_dilated == 0]\n", "\n", + "# Plot the distribution of voxels\n", "plt.figure()\n", "plt.subplot(1, 3, 1)\n", - "plt.hist(system.flatten(),10)\n", - "plt.ylim(0, 2.5e7)\n", + "plt.hist(system_all[:,0].flatten(),100)\n", "plt.xlabel('Activity')\n", "plt.ylabel('Frequency')\n", "\n", - "spatial = system[idxs[:10000], 0].reshape(100, 100)\n", - "plt.subplot(1, 3, 2)\n", - "plt.imshow(spatial)\n", - "plt.axis('off')\n", - "plt.title('Spatial plane')\n", - "plt.clim(spatial_range)\n", + "# Identify a subset of voxels to plot\n", + "idxs = list(range(system_all.shape[0]))\n", + "np.random.shuffle(idxs)\n", "\n", - "temporal = system[idxs[:100], :100]\n", - "plt.subplot(1, 3, 3)\n", + "temporal = system_all[idxs[:100], :100]\n", + "plt.subplot(1, 3, 2)\n", "plt.imshow(temporal)\n", "plt.axis('off')\n", - "plt.title('Temporal plane')\n", - "plt.clim(temporal_range)" + "plt.title('Temporal')\n", + "\n", + "# Plot the difference\n", + "ax=plt.subplot(1, 3, 3)\n", + "plt.hist(system_baseline[:,0].flatten(),100)\n", + "ax.yaxis.tick_right()\n", + "ax.yaxis.set_label_position(\"right\")\n", + "plt.xlabel('Activity difference')" ] }, { @@ -419,7 +3674,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 19, "metadata": { "collapsed": true }, @@ -429,22 +3684,49 @@ "noise_dict_sim = {'voxel_size': [dimsize[0], dimsize[1], dimsize[2]]}\n", "noise_dict_sim = fmrisim.calc_noise(volume=noise,\n", " mask=mask,\n", + " template=template,\n", " noise_dict=noise_dict_sim,\n", " )" ] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "SNR: 22.90 vs 21.93\n" + ] + } + ], + "source": [ + "print('SNR: %0.2f vs %0.2f' % (noise_dict['snr'], noise_dict_sim['snr']))" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Compare noise parameters for the real and simulated noise:\n", + "SNR: 22.90 vs 21.93\n", + "SFNR: 70.72 vs 70.75\n", + "FWHM: 5.65 vs 5.71\n" + ] + } + ], "source": [ "print('Compare noise parameters for the real and simulated noise:')\n", - "print('SNR: %s vs %s' % str(noise_dict['snr']), str(noise_dict_sim['snr']))\n", - "print('SFNR: %s vs %s' % str(noise_dict['sfnr']), str(noise_dict_sim['sfnr']))\n", - "print('FWHM: %s vs %s' % str(noise_dict['fwhm']), str(noise_dict_sim['fwhm']))" + "print('SNR: %0.2f vs %0.2f' % (noise_dict['snr'], noise_dict_sim['snr']))\n", + "print('SFNR: %0.2f vs %0.2f' % (noise_dict['sfnr'], noise_dict_sim['sfnr']))\n", + "print('FWHM: %0.2f vs %0.2f' % (noise_dict['fwhm'], noise_dict_sim['fwhm']))" ] }, { @@ -467,7 +3749,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 22, "metadata": { "collapsed": true }, @@ -486,9 +3768,809 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 23, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " this.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " // select the cell after this one\n", + " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", + " IPython.notebook.select(index + 1);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Plot pattern of activity for each condition\n", "plt.figure()\n", @@ -552,7 +5446,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 30, "metadata": { "collapsed": true }, @@ -574,7 +5468,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 31, "metadata": { "collapsed": true }, @@ -605,7 +5499,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 32, "metadata": { "collapsed": true }, @@ -639,7 +5533,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 33, "metadata": { "collapsed": true }, @@ -665,7 +5559,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 34, "metadata": { "collapsed": true }, @@ -680,9 +5574,809 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 35, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " this.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " // select the cell after this one\n", + " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", + " IPython.notebook.select(index + 1);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 41, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Plot the pattern of activity for our signal voxels at each timepoint\n", "plt.figure()\n", @@ -856,9 +7350,809 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 42, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " this.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", - " button.click(method_name, toolbar_event);\n", - " button.mouseover(tooltip, toolbar_mouse_event);\n", - " nav_element.append(button);\n", - " }\n", - "\n", - " // Add the status bar.\n", - " var status_bar = $('');\n", - " nav_element.append(status_bar);\n", - " this.message = status_bar[0];\n", - "\n", - " // Add the close button to the window.\n", - " var buttongrp = $('
');\n", - " var button = $('');\n", - " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", - " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", - " buttongrp.append(button);\n", - " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", - " titlebar.prepend(buttongrp);\n", - "}\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(el){\n", - " var fig = this\n", - " el.on(\"remove\", function(){\n", - "\tfig.close_ws(fig, {});\n", - " });\n", - "}\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(el){\n", - " // this is important to make the div 'focusable\n", - " el.attr('tabindex', 0)\n", - " // reach out to IPython and tell the keyboard manager to turn it's self\n", - " // off when our div gets focus\n", - "\n", - " // location in version 3\n", - " if (IPython.notebook.keyboard_manager) {\n", - " IPython.notebook.keyboard_manager.register_events(el);\n", - " }\n", - " else {\n", - " // location in version 2\n", - " IPython.keyboard_manager.register_events(el);\n", - " }\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._key_event_extra = function(event, name) {\n", - " var manager = IPython.notebook.keyboard_manager;\n", - " if (!manager)\n", - " manager = IPython.keyboard_manager;\n", - "\n", - " // Check for shift+enter\n", - " if (event.shiftKey && event.which == 13) {\n", - " this.canvas_div.blur();\n", - " // select the cell after this one\n", - " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", - " IPython.notebook.select(index + 1);\n", - " }\n", - "}\n", - "\n", - "mpl.figure.prototype.handle_save = function(fig, msg) {\n", - " fig.ondownload(fig, null);\n", - "}\n", - "\n", - "\n", - "mpl.find_output_cell = function(html_output) {\n", - " // Return the cell and output element which can be found *uniquely* in the notebook.\n", - " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", - " // IPython event is triggered only after the cells have been serialised, which for\n", - " // our purposes (turning an active figure into a static one), is too late.\n", - " var cells = IPython.notebook.get_cells();\n", - " var ncells = cells.length;\n", - " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", - " data = data.data;\n", - " }\n", - " if (data['text/html'] == html_output) {\n", - " return [cell, data, j];\n", - " }\n", - " }\n", - " }\n", - " }\n", - "}\n", - "\n", - "// Register the function which deals with the matplotlib target/channel.\n", - "// The kernel may be null if the page has been refreshed.\n", - "if (IPython.notebook.kernel != null) {\n", - " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", - "}\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "(-0.5, 63.5, 63.5, -0.5)" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# Plot a slice through the noise brain\n", "plt.figure()\n", @@ -1112,809 +223,9 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/javascript": [ - "/* Put everything inside the global mpl namespace */\n", - "window.mpl = {};\n", - "\n", - "\n", - "mpl.get_websocket_type = function() {\n", - " if (typeof(WebSocket) !== 'undefined') {\n", - " return WebSocket;\n", - " } else if (typeof(MozWebSocket) !== 'undefined') {\n", - " return MozWebSocket;\n", - " } else {\n", - " alert('Your browser does not have WebSocket support.' +\n", - " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", - " 'Firefox 4 and 5 are also supported but you ' +\n", - " 'have to enable WebSockets in about:config.');\n", - " };\n", - "}\n", - "\n", - "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", - " this.id = figure_id;\n", - "\n", - " this.ws = websocket;\n", - "\n", - " this.supports_binary = (this.ws.binaryType != undefined);\n", - "\n", - " if (!this.supports_binary) {\n", - " var warnings = document.getElementById(\"mpl-warnings\");\n", - " if (warnings) {\n", - " warnings.style.display = 'block';\n", - " warnings.textContent = (\n", - " \"This browser does not support binary websocket messages. \" +\n", - " \"Performance may be slow.\");\n", - " }\n", - " }\n", - "\n", - " this.imageObj = new Image();\n", - "\n", - " this.context = undefined;\n", - " this.message = undefined;\n", - " this.canvas = undefined;\n", - " this.rubberband_canvas = undefined;\n", - " this.rubberband_context = undefined;\n", - " this.format_dropdown = undefined;\n", - "\n", - " this.image_mode = 'full';\n", - "\n", - " this.root = $('
');\n", - " this._root_extra_style(this.root)\n", - " this.root.attr('style', 'display: inline-block');\n", - "\n", - " $(parent_element).append(this.root);\n", - "\n", - " this._init_header(this);\n", - " this._init_canvas(this);\n", - " this._init_toolbar(this);\n", - "\n", - " var fig = this;\n", - "\n", - " this.waiting = false;\n", - "\n", - " this.ws.onopen = function () {\n", - " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", - " fig.send_message(\"send_image_mode\", {});\n", - " if (mpl.ratio != 1) {\n", - " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", - " }\n", - " fig.send_message(\"refresh\", {});\n", - " }\n", - "\n", - " this.imageObj.onload = function() {\n", - " if (fig.image_mode == 'full') {\n", - " // Full images could contain transparency (where diff images\n", - " // almost always do), so we need to clear the canvas so that\n", - " // there is no ghosting.\n", - " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", - " }\n", - " fig.context.drawImage(fig.imageObj, 0, 0);\n", - " };\n", - "\n", - " this.imageObj.onunload = function() {\n", - " this.ws.close();\n", - " }\n", - "\n", - " this.ws.onmessage = this._make_on_message_function(this);\n", - "\n", - " this.ondownload = ondownload;\n", - "}\n", - "\n", - "mpl.figure.prototype._init_header = function() {\n", - " var titlebar = $(\n", - " '
');\n", - " var titletext = $(\n", - " '
');\n", - " titlebar.append(titletext)\n", - " this.root.append(titlebar);\n", - " this.header = titletext[0];\n", - "}\n", - "\n", - "\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._init_canvas = function() {\n", - " var fig = this;\n", - "\n", - " var canvas_div = $('
');\n", - "\n", - " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", - "\n", - " function canvas_keyboard_event(event) {\n", - " return fig.key_event(event, event['data']);\n", - " }\n", - "\n", - " canvas_div.keydown('key_press', canvas_keyboard_event);\n", - " canvas_div.keyup('key_release', canvas_keyboard_event);\n", - " this.canvas_div = canvas_div\n", - " this._canvas_extra_style(canvas_div)\n", - " this.root.append(canvas_div);\n", - "\n", - " var canvas = $('');\n", - " canvas.addClass('mpl-canvas');\n", - " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", - "\n", - " this.canvas = canvas[0];\n", - " this.context = canvas[0].getContext(\"2d\");\n", - "\n", - " var backingStore = this.context.backingStorePixelRatio ||\n", - "\tthis.context.webkitBackingStorePixelRatio ||\n", - "\tthis.context.mozBackingStorePixelRatio ||\n", - "\tthis.context.msBackingStorePixelRatio ||\n", - "\tthis.context.oBackingStorePixelRatio ||\n", - "\tthis.context.backingStorePixelRatio || 1;\n", - "\n", - " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", - "\n", - " var rubberband = $('');\n", - " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", - "\n", - " var pass_mouse_events = true;\n", - "\n", - " canvas_div.resizable({\n", - " start: function(event, ui) {\n", - " pass_mouse_events = false;\n", - " },\n", - " resize: function(event, ui) {\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " stop: function(event, ui) {\n", - " pass_mouse_events = true;\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " });\n", - "\n", - " function mouse_event_fn(event) {\n", - " if (pass_mouse_events)\n", - " return fig.mouse_event(event, event['data']);\n", - " }\n", - "\n", - " rubberband.mousedown('button_press', mouse_event_fn);\n", - " rubberband.mouseup('button_release', mouse_event_fn);\n", - " // Throttle sequential mouse events to 1 every 20ms.\n", - " rubberband.mousemove('motion_notify', mouse_event_fn);\n", - "\n", - " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", - " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", - "\n", - " canvas_div.on(\"wheel\", function (event) {\n", - " event = event.originalEvent;\n", - " event['data'] = 'scroll'\n", - " if (event.deltaY < 0) {\n", - " event.step = 1;\n", - " } else {\n", - " event.step = -1;\n", - " }\n", - " mouse_event_fn(event);\n", - " });\n", - "\n", - " canvas_div.append(canvas);\n", - " canvas_div.append(rubberband);\n", - "\n", - " this.rubberband = rubberband;\n", - " this.rubberband_canvas = rubberband[0];\n", - " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", - " this.rubberband_context.strokeStyle = \"#000000\";\n", - "\n", - " this._resize_canvas = function(width, height) {\n", - " // Keep the size of the canvas, canvas container, and rubber band\n", - " // canvas in synch.\n", - " canvas_div.css('width', width)\n", - " canvas_div.css('height', height)\n", - "\n", - " canvas.attr('width', width * mpl.ratio);\n", - " canvas.attr('height', height * mpl.ratio);\n", - " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", - "\n", - " rubberband.attr('width', width);\n", - " rubberband.attr('height', height);\n", - " }\n", - "\n", - " // Set the figure to an initial 600x600px, this will subsequently be updated\n", - " // upon first draw.\n", - " this._resize_canvas(600, 600);\n", - "\n", - " // Disable right mouse context menu.\n", - " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", - " return false;\n", - " });\n", - "\n", - " function set_focus () {\n", - " canvas.focus();\n", - " canvas_div.focus();\n", - " }\n", - "\n", - " window.setTimeout(set_focus, 100);\n", - "}\n", - "\n", - "mpl.figure.prototype._init_toolbar = function() {\n", - " var fig = this;\n", - "\n", - " var nav_element = $('
')\n", - " nav_element.attr('style', 'width: 100%');\n", - " this.root.append(nav_element);\n", - "\n", - " // Define a callback function for later on.\n", - " function toolbar_event(event) {\n", - " return fig.toolbar_button_onclick(event['data']);\n", - " }\n", - " function toolbar_mouse_event(event) {\n", - " return fig.toolbar_button_onmouseover(event['data']);\n", - " }\n", - "\n", - " for(var toolbar_ind in mpl.toolbar_items) {\n", - " var name = mpl.toolbar_items[toolbar_ind][0];\n", - " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", - " var image = mpl.toolbar_items[toolbar_ind][2];\n", - " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", - "\n", - " if (!name) {\n", - " // put a spacer in here.\n", - " continue;\n", - " }\n", - " var button = $('');\n", - " button.click(method_name, toolbar_event);\n", - " button.mouseover(tooltip, toolbar_mouse_event);\n", - " nav_element.append(button);\n", - " }\n", - "\n", - " // Add the status bar.\n", - " var status_bar = $('');\n", - " nav_element.append(status_bar);\n", - " this.message = status_bar[0];\n", - "\n", - " // Add the close button to the window.\n", - " var buttongrp = $('
');\n", - " var button = $('');\n", - " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", - " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", - " buttongrp.append(button);\n", - " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", - " titlebar.prepend(buttongrp);\n", - "}\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(el){\n", - " var fig = this\n", - " el.on(\"remove\", function(){\n", - "\tfig.close_ws(fig, {});\n", - " });\n", - "}\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(el){\n", - " // this is important to make the div 'focusable\n", - " el.attr('tabindex', 0)\n", - " // reach out to IPython and tell the keyboard manager to turn it's self\n", - " // off when our div gets focus\n", - "\n", - " // location in version 3\n", - " if (IPython.notebook.keyboard_manager) {\n", - " IPython.notebook.keyboard_manager.register_events(el);\n", - " }\n", - " else {\n", - " // location in version 2\n", - " IPython.keyboard_manager.register_events(el);\n", - " }\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._key_event_extra = function(event, name) {\n", - " var manager = IPython.notebook.keyboard_manager;\n", - " if (!manager)\n", - " manager = IPython.keyboard_manager;\n", - "\n", - " // Check for shift+enter\n", - " if (event.shiftKey && event.which == 13) {\n", - " this.canvas_div.blur();\n", - " // select the cell after this one\n", - " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", - " IPython.notebook.select(index + 1);\n", - " }\n", - "}\n", - "\n", - "mpl.figure.prototype.handle_save = function(fig, msg) {\n", - " fig.ondownload(fig, null);\n", - "}\n", - "\n", - "\n", - "mpl.find_output_cell = function(html_output) {\n", - " // Return the cell and output element which can be found *uniquely* in the notebook.\n", - " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", - " // IPython event is triggered only after the cells have been serialised, which for\n", - " // our purposes (turning an active figure into a static one), is too late.\n", - " var cells = IPython.notebook.get_cells();\n", - " var ncells = cells.length;\n", - " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", - " data = data.data;\n", - " }\n", - " if (data['text/html'] == html_output) {\n", - " return [cell, data, j];\n", - " }\n", - " }\n", - " }\n", - " }\n", - "}\n", - "\n", - "// Register the function which deals with the matplotlib target/channel.\n", - "// The kernel may be null if the page has been refreshed.\n", - "if (IPython.notebook.kernel != null) {\n", - " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", - "}\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# Plot the different noise types\n", "plt.figure()\n", @@ -2813,809 +324,9 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/javascript": [ - "/* Put everything inside the global mpl namespace */\n", - "window.mpl = {};\n", - "\n", - "\n", - "mpl.get_websocket_type = function() {\n", - " if (typeof(WebSocket) !== 'undefined') {\n", - " return WebSocket;\n", - " } else if (typeof(MozWebSocket) !== 'undefined') {\n", - " return MozWebSocket;\n", - " } else {\n", - " alert('Your browser does not have WebSocket support.' +\n", - " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", - " 'Firefox 4 and 5 are also supported but you ' +\n", - " 'have to enable WebSockets in about:config.');\n", - " };\n", - "}\n", - "\n", - "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", - " this.id = figure_id;\n", - "\n", - " this.ws = websocket;\n", - "\n", - " this.supports_binary = (this.ws.binaryType != undefined);\n", - "\n", - " if (!this.supports_binary) {\n", - " var warnings = document.getElementById(\"mpl-warnings\");\n", - " if (warnings) {\n", - " warnings.style.display = 'block';\n", - " warnings.textContent = (\n", - " \"This browser does not support binary websocket messages. \" +\n", - " \"Performance may be slow.\");\n", - " }\n", - " }\n", - "\n", - " this.imageObj = new Image();\n", - "\n", - " this.context = undefined;\n", - " this.message = undefined;\n", - " this.canvas = undefined;\n", - " this.rubberband_canvas = undefined;\n", - " this.rubberband_context = undefined;\n", - " this.format_dropdown = undefined;\n", - "\n", - " this.image_mode = 'full';\n", - "\n", - " this.root = $('
');\n", - " this._root_extra_style(this.root)\n", - " this.root.attr('style', 'display: inline-block');\n", - "\n", - " $(parent_element).append(this.root);\n", - "\n", - " this._init_header(this);\n", - " this._init_canvas(this);\n", - " this._init_toolbar(this);\n", - "\n", - " var fig = this;\n", - "\n", - " this.waiting = false;\n", - "\n", - " this.ws.onopen = function () {\n", - " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", - " fig.send_message(\"send_image_mode\", {});\n", - " if (mpl.ratio != 1) {\n", - " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", - " }\n", - " fig.send_message(\"refresh\", {});\n", - " }\n", - "\n", - " this.imageObj.onload = function() {\n", - " if (fig.image_mode == 'full') {\n", - " // Full images could contain transparency (where diff images\n", - " // almost always do), so we need to clear the canvas so that\n", - " // there is no ghosting.\n", - " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", - " }\n", - " fig.context.drawImage(fig.imageObj, 0, 0);\n", - " };\n", - "\n", - " this.imageObj.onunload = function() {\n", - " this.ws.close();\n", - " }\n", - "\n", - " this.ws.onmessage = this._make_on_message_function(this);\n", - "\n", - " this.ondownload = ondownload;\n", - "}\n", - "\n", - "mpl.figure.prototype._init_header = function() {\n", - " var titlebar = $(\n", - " '
');\n", - " var titletext = $(\n", - " '
');\n", - " titlebar.append(titletext)\n", - " this.root.append(titlebar);\n", - " this.header = titletext[0];\n", - "}\n", - "\n", - "\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._init_canvas = function() {\n", - " var fig = this;\n", - "\n", - " var canvas_div = $('
');\n", - "\n", - " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", - "\n", - " function canvas_keyboard_event(event) {\n", - " return fig.key_event(event, event['data']);\n", - " }\n", - "\n", - " canvas_div.keydown('key_press', canvas_keyboard_event);\n", - " canvas_div.keyup('key_release', canvas_keyboard_event);\n", - " this.canvas_div = canvas_div\n", - " this._canvas_extra_style(canvas_div)\n", - " this.root.append(canvas_div);\n", - "\n", - " var canvas = $('');\n", - " canvas.addClass('mpl-canvas');\n", - " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", - "\n", - " this.canvas = canvas[0];\n", - " this.context = canvas[0].getContext(\"2d\");\n", - "\n", - " var backingStore = this.context.backingStorePixelRatio ||\n", - "\tthis.context.webkitBackingStorePixelRatio ||\n", - "\tthis.context.mozBackingStorePixelRatio ||\n", - "\tthis.context.msBackingStorePixelRatio ||\n", - "\tthis.context.oBackingStorePixelRatio ||\n", - "\tthis.context.backingStorePixelRatio || 1;\n", - "\n", - " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", - "\n", - " var rubberband = $('');\n", - " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", - "\n", - " var pass_mouse_events = true;\n", - "\n", - " canvas_div.resizable({\n", - " start: function(event, ui) {\n", - " pass_mouse_events = false;\n", - " },\n", - " resize: function(event, ui) {\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " stop: function(event, ui) {\n", - " pass_mouse_events = true;\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " });\n", - "\n", - " function mouse_event_fn(event) {\n", - " if (pass_mouse_events)\n", - " return fig.mouse_event(event, event['data']);\n", - " }\n", - "\n", - " rubberband.mousedown('button_press', mouse_event_fn);\n", - " rubberband.mouseup('button_release', mouse_event_fn);\n", - " // Throttle sequential mouse events to 1 every 20ms.\n", - " rubberband.mousemove('motion_notify', mouse_event_fn);\n", - "\n", - " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", - " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", - "\n", - " canvas_div.on(\"wheel\", function (event) {\n", - " event = event.originalEvent;\n", - " event['data'] = 'scroll'\n", - " if (event.deltaY < 0) {\n", - " event.step = 1;\n", - " } else {\n", - " event.step = -1;\n", - " }\n", - " mouse_event_fn(event);\n", - " });\n", - "\n", - " canvas_div.append(canvas);\n", - " canvas_div.append(rubberband);\n", - "\n", - " this.rubberband = rubberband;\n", - " this.rubberband_canvas = rubberband[0];\n", - " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", - " this.rubberband_context.strokeStyle = \"#000000\";\n", - "\n", - " this._resize_canvas = function(width, height) {\n", - " // Keep the size of the canvas, canvas container, and rubber band\n", - " // canvas in synch.\n", - " canvas_div.css('width', width)\n", - " canvas_div.css('height', height)\n", - "\n", - " canvas.attr('width', width * mpl.ratio);\n", - " canvas.attr('height', height * mpl.ratio);\n", - " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", - "\n", - " rubberband.attr('width', width);\n", - " rubberband.attr('height', height);\n", - " }\n", - "\n", - " // Set the figure to an initial 600x600px, this will subsequently be updated\n", - " // upon first draw.\n", - " this._resize_canvas(600, 600);\n", - "\n", - " // Disable right mouse context menu.\n", - " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", - " return false;\n", - " });\n", - "\n", - " function set_focus () {\n", - " canvas.focus();\n", - " canvas_div.focus();\n", - " }\n", - "\n", - " window.setTimeout(set_focus, 100);\n", - "}\n", - "\n", - "mpl.figure.prototype._init_toolbar = function() {\n", - " var fig = this;\n", - "\n", - " var nav_element = $('
')\n", - " nav_element.attr('style', 'width: 100%');\n", - " this.root.append(nav_element);\n", - "\n", - " // Define a callback function for later on.\n", - " function toolbar_event(event) {\n", - " return fig.toolbar_button_onclick(event['data']);\n", - " }\n", - " function toolbar_mouse_event(event) {\n", - " return fig.toolbar_button_onmouseover(event['data']);\n", - " }\n", - "\n", - " for(var toolbar_ind in mpl.toolbar_items) {\n", - " var name = mpl.toolbar_items[toolbar_ind][0];\n", - " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", - " var image = mpl.toolbar_items[toolbar_ind][2];\n", - " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", - "\n", - " if (!name) {\n", - " // put a spacer in here.\n", - " continue;\n", - " }\n", - " var button = $('');\n", - " button.click(method_name, toolbar_event);\n", - " button.mouseover(tooltip, toolbar_mouse_event);\n", - " nav_element.append(button);\n", - " }\n", - "\n", - " // Add the status bar.\n", - " var status_bar = $('');\n", - " nav_element.append(status_bar);\n", - " this.message = status_bar[0];\n", - "\n", - " // Add the close button to the window.\n", - " var buttongrp = $('
');\n", - " var button = $('');\n", - " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", - " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", - " buttongrp.append(button);\n", - " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", - " titlebar.prepend(buttongrp);\n", - "}\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(el){\n", - " var fig = this\n", - " el.on(\"remove\", function(){\n", - "\tfig.close_ws(fig, {});\n", - " });\n", - "}\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(el){\n", - " // this is important to make the div 'focusable\n", - " el.attr('tabindex', 0)\n", - " // reach out to IPython and tell the keyboard manager to turn it's self\n", - " // off when our div gets focus\n", - "\n", - " // location in version 3\n", - " if (IPython.notebook.keyboard_manager) {\n", - " IPython.notebook.keyboard_manager.register_events(el);\n", - " }\n", - " else {\n", - " // location in version 2\n", - " IPython.keyboard_manager.register_events(el);\n", - " }\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._key_event_extra = function(event, name) {\n", - " var manager = IPython.notebook.keyboard_manager;\n", - " if (!manager)\n", - " manager = IPython.keyboard_manager;\n", - "\n", - " // Check for shift+enter\n", - " if (event.shiftKey && event.which == 13) {\n", - " this.canvas_div.blur();\n", - " // select the cell after this one\n", - " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", - " IPython.notebook.select(index + 1);\n", - " }\n", - "}\n", - "\n", - "mpl.figure.prototype.handle_save = function(fig, msg) {\n", - " fig.ondownload(fig, null);\n", - "}\n", - "\n", - "\n", - "mpl.find_output_cell = function(html_output) {\n", - " // Return the cell and output element which can be found *uniquely* in the notebook.\n", - " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", - " // IPython event is triggered only after the cells have been serialised, which for\n", - " // our purposes (turning an active figure into a static one), is too late.\n", - " var cells = IPython.notebook.get_cells();\n", - " var ncells = cells.length;\n", - " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", - " data = data.data;\n", - " }\n", - " if (data['text/html'] == html_output) {\n", - " return [cell, data, j];\n", - " }\n", - " }\n", - " }\n", - " }\n", - "}\n", - "\n", - "// Register the function which deals with the matplotlib target/channel.\n", - "// The kernel may be null if the page has been refreshed.\n", - "if (IPython.notebook.kernel != null) {\n", - " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", - "}\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "(-0.5, 63.5, 63.5, -0.5)" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "plt.figure()\n", "plt.imshow(signal_volume[:, :, 21], cmap=plt.cm.gray)\n", @@ -4589,7 +481,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "metadata": { "collapsed": true }, @@ -4606,820 +498,8 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "print(pattern_A.mean())\n", - "print(pattern_A.mean())\n" - ] - }, - { - "cell_type": "code", - "execution_count": 25, "metadata": {}, - "outputs": [ - { - "data": { - "application/javascript": [ - "/* Put everything inside the global mpl namespace */\n", - "window.mpl = {};\n", - "\n", - "\n", - "mpl.get_websocket_type = function() {\n", - " if (typeof(WebSocket) !== 'undefined') {\n", - " return WebSocket;\n", - " } else if (typeof(MozWebSocket) !== 'undefined') {\n", - " return MozWebSocket;\n", - " } else {\n", - " alert('Your browser does not have WebSocket support.' +\n", - " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", - " 'Firefox 4 and 5 are also supported but you ' +\n", - " 'have to enable WebSockets in about:config.');\n", - " };\n", - "}\n", - "\n", - "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", - " this.id = figure_id;\n", - "\n", - " this.ws = websocket;\n", - "\n", - " this.supports_binary = (this.ws.binaryType != undefined);\n", - "\n", - " if (!this.supports_binary) {\n", - " var warnings = document.getElementById(\"mpl-warnings\");\n", - " if (warnings) {\n", - " warnings.style.display = 'block';\n", - " warnings.textContent = (\n", - " \"This browser does not support binary websocket messages. \" +\n", - " \"Performance may be slow.\");\n", - " }\n", - " }\n", - "\n", - " this.imageObj = new Image();\n", - "\n", - " this.context = undefined;\n", - " this.message = undefined;\n", - " this.canvas = undefined;\n", - " this.rubberband_canvas = undefined;\n", - " this.rubberband_context = undefined;\n", - " this.format_dropdown = undefined;\n", - "\n", - " this.image_mode = 'full';\n", - "\n", - " this.root = $('
');\n", - " this._root_extra_style(this.root)\n", - " this.root.attr('style', 'display: inline-block');\n", - "\n", - " $(parent_element).append(this.root);\n", - "\n", - " this._init_header(this);\n", - " this._init_canvas(this);\n", - " this._init_toolbar(this);\n", - "\n", - " var fig = this;\n", - "\n", - " this.waiting = false;\n", - "\n", - " this.ws.onopen = function () {\n", - " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", - " fig.send_message(\"send_image_mode\", {});\n", - " if (mpl.ratio != 1) {\n", - " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", - " }\n", - " fig.send_message(\"refresh\", {});\n", - " }\n", - "\n", - " this.imageObj.onload = function() {\n", - " if (fig.image_mode == 'full') {\n", - " // Full images could contain transparency (where diff images\n", - " // almost always do), so we need to clear the canvas so that\n", - " // there is no ghosting.\n", - " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", - " }\n", - " fig.context.drawImage(fig.imageObj, 0, 0);\n", - " };\n", - "\n", - " this.imageObj.onunload = function() {\n", - " this.ws.close();\n", - " }\n", - "\n", - " this.ws.onmessage = this._make_on_message_function(this);\n", - "\n", - " this.ondownload = ondownload;\n", - "}\n", - "\n", - "mpl.figure.prototype._init_header = function() {\n", - " var titlebar = $(\n", - " '
');\n", - " var titletext = $(\n", - " '
');\n", - " titlebar.append(titletext)\n", - " this.root.append(titlebar);\n", - " this.header = titletext[0];\n", - "}\n", - "\n", - "\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._init_canvas = function() {\n", - " var fig = this;\n", - "\n", - " var canvas_div = $('
');\n", - "\n", - " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", - "\n", - " function canvas_keyboard_event(event) {\n", - " return fig.key_event(event, event['data']);\n", - " }\n", - "\n", - " canvas_div.keydown('key_press', canvas_keyboard_event);\n", - " canvas_div.keyup('key_release', canvas_keyboard_event);\n", - " this.canvas_div = canvas_div\n", - " this._canvas_extra_style(canvas_div)\n", - " this.root.append(canvas_div);\n", - "\n", - " var canvas = $('');\n", - " canvas.addClass('mpl-canvas');\n", - " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", - "\n", - " this.canvas = canvas[0];\n", - " this.context = canvas[0].getContext(\"2d\");\n", - "\n", - " var backingStore = this.context.backingStorePixelRatio ||\n", - "\tthis.context.webkitBackingStorePixelRatio ||\n", - "\tthis.context.mozBackingStorePixelRatio ||\n", - "\tthis.context.msBackingStorePixelRatio ||\n", - "\tthis.context.oBackingStorePixelRatio ||\n", - "\tthis.context.backingStorePixelRatio || 1;\n", - "\n", - " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", - "\n", - " var rubberband = $('');\n", - " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", - "\n", - " var pass_mouse_events = true;\n", - "\n", - " canvas_div.resizable({\n", - " start: function(event, ui) {\n", - " pass_mouse_events = false;\n", - " },\n", - " resize: function(event, ui) {\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " stop: function(event, ui) {\n", - " pass_mouse_events = true;\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " });\n", - "\n", - " function mouse_event_fn(event) {\n", - " if (pass_mouse_events)\n", - " return fig.mouse_event(event, event['data']);\n", - " }\n", - "\n", - " rubberband.mousedown('button_press', mouse_event_fn);\n", - " rubberband.mouseup('button_release', mouse_event_fn);\n", - " // Throttle sequential mouse events to 1 every 20ms.\n", - " rubberband.mousemove('motion_notify', mouse_event_fn);\n", - "\n", - " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", - " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", - "\n", - " canvas_div.on(\"wheel\", function (event) {\n", - " event = event.originalEvent;\n", - " event['data'] = 'scroll'\n", - " if (event.deltaY < 0) {\n", - " event.step = 1;\n", - " } else {\n", - " event.step = -1;\n", - " }\n", - " mouse_event_fn(event);\n", - " });\n", - "\n", - " canvas_div.append(canvas);\n", - " canvas_div.append(rubberband);\n", - "\n", - " this.rubberband = rubberband;\n", - " this.rubberband_canvas = rubberband[0];\n", - " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", - " this.rubberband_context.strokeStyle = \"#000000\";\n", - "\n", - " this._resize_canvas = function(width, height) {\n", - " // Keep the size of the canvas, canvas container, and rubber band\n", - " // canvas in synch.\n", - " canvas_div.css('width', width)\n", - " canvas_div.css('height', height)\n", - "\n", - " canvas.attr('width', width * mpl.ratio);\n", - " canvas.attr('height', height * mpl.ratio);\n", - " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", - "\n", - " rubberband.attr('width', width);\n", - " rubberband.attr('height', height);\n", - " }\n", - "\n", - " // Set the figure to an initial 600x600px, this will subsequently be updated\n", - " // upon first draw.\n", - " this._resize_canvas(600, 600);\n", - "\n", - " // Disable right mouse context menu.\n", - " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", - " return false;\n", - " });\n", - "\n", - " function set_focus () {\n", - " canvas.focus();\n", - " canvas_div.focus();\n", - " }\n", - "\n", - " window.setTimeout(set_focus, 100);\n", - "}\n", - "\n", - "mpl.figure.prototype._init_toolbar = function() {\n", - " var fig = this;\n", - "\n", - " var nav_element = $('
')\n", - " nav_element.attr('style', 'width: 100%');\n", - " this.root.append(nav_element);\n", - "\n", - " // Define a callback function for later on.\n", - " function toolbar_event(event) {\n", - " return fig.toolbar_button_onclick(event['data']);\n", - " }\n", - " function toolbar_mouse_event(event) {\n", - " return fig.toolbar_button_onmouseover(event['data']);\n", - " }\n", - "\n", - " for(var toolbar_ind in mpl.toolbar_items) {\n", - " var name = mpl.toolbar_items[toolbar_ind][0];\n", - " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", - " var image = mpl.toolbar_items[toolbar_ind][2];\n", - " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", - "\n", - " if (!name) {\n", - " // put a spacer in here.\n", - " continue;\n", - " }\n", - " var button = $('');\n", - " button.click(method_name, toolbar_event);\n", - " button.mouseover(tooltip, toolbar_mouse_event);\n", - " nav_element.append(button);\n", - " }\n", - "\n", - " // Add the status bar.\n", - " var status_bar = $('');\n", - " nav_element.append(status_bar);\n", - " this.message = status_bar[0];\n", - "\n", - " // Add the close button to the window.\n", - " var buttongrp = $('
');\n", - " var button = $('');\n", - " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", - " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", - " buttongrp.append(button);\n", - " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", - " titlebar.prepend(buttongrp);\n", - "}\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(el){\n", - " var fig = this\n", - " el.on(\"remove\", function(){\n", - "\tfig.close_ws(fig, {});\n", - " });\n", - "}\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(el){\n", - " // this is important to make the div 'focusable\n", - " el.attr('tabindex', 0)\n", - " // reach out to IPython and tell the keyboard manager to turn it's self\n", - " // off when our div gets focus\n", - "\n", - " // location in version 3\n", - " if (IPython.notebook.keyboard_manager) {\n", - " IPython.notebook.keyboard_manager.register_events(el);\n", - " }\n", - " else {\n", - " // location in version 2\n", - " IPython.keyboard_manager.register_events(el);\n", - " }\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._key_event_extra = function(event, name) {\n", - " var manager = IPython.notebook.keyboard_manager;\n", - " if (!manager)\n", - " manager = IPython.keyboard_manager;\n", - "\n", - " // Check for shift+enter\n", - " if (event.shiftKey && event.which == 13) {\n", - " this.canvas_div.blur();\n", - " // select the cell after this one\n", - " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", - " IPython.notebook.select(index + 1);\n", - " }\n", - "}\n", - "\n", - "mpl.figure.prototype.handle_save = function(fig, msg) {\n", - " fig.ondownload(fig, null);\n", - "}\n", - "\n", - "\n", - "mpl.find_output_cell = function(html_output) {\n", - " // Return the cell and output element which can be found *uniquely* in the notebook.\n", - " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", - " // IPython event is triggered only after the cells have been serialised, which for\n", - " // our purposes (turning an active figure into a static one), is too late.\n", - " var cells = IPython.notebook.get_cells();\n", - " var ncells = cells.length;\n", - " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", - " data = data.data;\n", - " }\n", - " if (data['text/html'] == html_output) {\n", - " return [cell, data, j];\n", - " }\n", - " }\n", - " }\n", - " }\n", - "}\n", - "\n", - "// Register the function which deals with the matplotlib target/channel.\n", - "// The kernel may be null if the page has been refreshed.\n", - "if (IPython.notebook.kernel != null) {\n", - " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", - "}\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 35, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# Prepare the data to be plotted\n", "response = signal_func[0:100,0] * 2\n", @@ -6405,7 +685,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": null, "metadata": { "collapsed": true }, @@ -6424,7 +704,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": null, "metadata": { "collapsed": true }, @@ -6450,7 +730,7 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": null, "metadata": { "collapsed": true }, @@ -6472,7 +752,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": null, "metadata": { "collapsed": true }, @@ -6501,7 +781,7 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": null, "metadata": { "collapsed": true }, @@ -6524,809 +804,9 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/javascript": [ - "/* Put everything inside the global mpl namespace */\n", - "window.mpl = {};\n", - "\n", - "\n", - "mpl.get_websocket_type = function() {\n", - " if (typeof(WebSocket) !== 'undefined') {\n", - " return WebSocket;\n", - " } else if (typeof(MozWebSocket) !== 'undefined') {\n", - " return MozWebSocket;\n", - " } else {\n", - " alert('Your browser does not have WebSocket support.' +\n", - " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", - " 'Firefox 4 and 5 are also supported but you ' +\n", - " 'have to enable WebSockets in about:config.');\n", - " };\n", - "}\n", - "\n", - "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", - " this.id = figure_id;\n", - "\n", - " this.ws = websocket;\n", - "\n", - " this.supports_binary = (this.ws.binaryType != undefined);\n", - "\n", - " if (!this.supports_binary) {\n", - " var warnings = document.getElementById(\"mpl-warnings\");\n", - " if (warnings) {\n", - " warnings.style.display = 'block';\n", - " warnings.textContent = (\n", - " \"This browser does not support binary websocket messages. \" +\n", - " \"Performance may be slow.\");\n", - " }\n", - " }\n", - "\n", - " this.imageObj = new Image();\n", - "\n", - " this.context = undefined;\n", - " this.message = undefined;\n", - " this.canvas = undefined;\n", - " this.rubberband_canvas = undefined;\n", - " this.rubberband_context = undefined;\n", - " this.format_dropdown = undefined;\n", - "\n", - " this.image_mode = 'full';\n", - "\n", - " this.root = $('
');\n", - " this._root_extra_style(this.root)\n", - " this.root.attr('style', 'display: inline-block');\n", - "\n", - " $(parent_element).append(this.root);\n", - "\n", - " this._init_header(this);\n", - " this._init_canvas(this);\n", - " this._init_toolbar(this);\n", - "\n", - " var fig = this;\n", - "\n", - " this.waiting = false;\n", - "\n", - " this.ws.onopen = function () {\n", - " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", - " fig.send_message(\"send_image_mode\", {});\n", - " if (mpl.ratio != 1) {\n", - " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", - " }\n", - " fig.send_message(\"refresh\", {});\n", - " }\n", - "\n", - " this.imageObj.onload = function() {\n", - " if (fig.image_mode == 'full') {\n", - " // Full images could contain transparency (where diff images\n", - " // almost always do), so we need to clear the canvas so that\n", - " // there is no ghosting.\n", - " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", - " }\n", - " fig.context.drawImage(fig.imageObj, 0, 0);\n", - " };\n", - "\n", - " this.imageObj.onunload = function() {\n", - " this.ws.close();\n", - " }\n", - "\n", - " this.ws.onmessage = this._make_on_message_function(this);\n", - "\n", - " this.ondownload = ondownload;\n", - "}\n", - "\n", - "mpl.figure.prototype._init_header = function() {\n", - " var titlebar = $(\n", - " '
');\n", - " var titletext = $(\n", - " '
');\n", - " titlebar.append(titletext)\n", - " this.root.append(titlebar);\n", - " this.header = titletext[0];\n", - "}\n", - "\n", - "\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._init_canvas = function() {\n", - " var fig = this;\n", - "\n", - " var canvas_div = $('
');\n", - "\n", - " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", - "\n", - " function canvas_keyboard_event(event) {\n", - " return fig.key_event(event, event['data']);\n", - " }\n", - "\n", - " canvas_div.keydown('key_press', canvas_keyboard_event);\n", - " canvas_div.keyup('key_release', canvas_keyboard_event);\n", - " this.canvas_div = canvas_div\n", - " this._canvas_extra_style(canvas_div)\n", - " this.root.append(canvas_div);\n", - "\n", - " var canvas = $('');\n", - " canvas.addClass('mpl-canvas');\n", - " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", - "\n", - " this.canvas = canvas[0];\n", - " this.context = canvas[0].getContext(\"2d\");\n", - "\n", - " var backingStore = this.context.backingStorePixelRatio ||\n", - "\tthis.context.webkitBackingStorePixelRatio ||\n", - "\tthis.context.mozBackingStorePixelRatio ||\n", - "\tthis.context.msBackingStorePixelRatio ||\n", - "\tthis.context.oBackingStorePixelRatio ||\n", - "\tthis.context.backingStorePixelRatio || 1;\n", - "\n", - " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", - "\n", - " var rubberband = $('');\n", - " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", - "\n", - " var pass_mouse_events = true;\n", - "\n", - " canvas_div.resizable({\n", - " start: function(event, ui) {\n", - " pass_mouse_events = false;\n", - " },\n", - " resize: function(event, ui) {\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " stop: function(event, ui) {\n", - " pass_mouse_events = true;\n", - " fig.request_resize(ui.size.width, ui.size.height);\n", - " },\n", - " });\n", - "\n", - " function mouse_event_fn(event) {\n", - " if (pass_mouse_events)\n", - " return fig.mouse_event(event, event['data']);\n", - " }\n", - "\n", - " rubberband.mousedown('button_press', mouse_event_fn);\n", - " rubberband.mouseup('button_release', mouse_event_fn);\n", - " // Throttle sequential mouse events to 1 every 20ms.\n", - " rubberband.mousemove('motion_notify', mouse_event_fn);\n", - "\n", - " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", - " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", - "\n", - " canvas_div.on(\"wheel\", function (event) {\n", - " event = event.originalEvent;\n", - " event['data'] = 'scroll'\n", - " if (event.deltaY < 0) {\n", - " event.step = 1;\n", - " } else {\n", - " event.step = -1;\n", - " }\n", - " mouse_event_fn(event);\n", - " });\n", - "\n", - " canvas_div.append(canvas);\n", - " canvas_div.append(rubberband);\n", - "\n", - " this.rubberband = rubberband;\n", - " this.rubberband_canvas = rubberband[0];\n", - " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", - " this.rubberband_context.strokeStyle = \"#000000\";\n", - "\n", - " this._resize_canvas = function(width, height) {\n", - " // Keep the size of the canvas, canvas container, and rubber band\n", - " // canvas in synch.\n", - " canvas_div.css('width', width)\n", - " canvas_div.css('height', height)\n", - "\n", - " canvas.attr('width', width * mpl.ratio);\n", - " canvas.attr('height', height * mpl.ratio);\n", - " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", - "\n", - " rubberband.attr('width', width);\n", - " rubberband.attr('height', height);\n", - " }\n", - "\n", - " // Set the figure to an initial 600x600px, this will subsequently be updated\n", - " // upon first draw.\n", - " this._resize_canvas(600, 600);\n", - "\n", - " // Disable right mouse context menu.\n", - " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", - " return false;\n", - " });\n", - "\n", - " function set_focus () {\n", - " canvas.focus();\n", - " canvas_div.focus();\n", - " }\n", - "\n", - " window.setTimeout(set_focus, 100);\n", - "}\n", - "\n", - "mpl.figure.prototype._init_toolbar = function() {\n", - " var fig = this;\n", - "\n", - " var nav_element = $('
')\n", - " nav_element.attr('style', 'width: 100%');\n", - " this.root.append(nav_element);\n", - "\n", - " // Define a callback function for later on.\n", - " function toolbar_event(event) {\n", - " return fig.toolbar_button_onclick(event['data']);\n", - " }\n", - " function toolbar_mouse_event(event) {\n", - " return fig.toolbar_button_onmouseover(event['data']);\n", - " }\n", - "\n", - " for(var toolbar_ind in mpl.toolbar_items) {\n", - " var name = mpl.toolbar_items[toolbar_ind][0];\n", - " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", - " var image = mpl.toolbar_items[toolbar_ind][2];\n", - " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", - "\n", - " if (!name) {\n", - " // put a spacer in here.\n", - " continue;\n", - " }\n", - " var button = $('');\n", - " button.click(method_name, toolbar_event);\n", - " button.mouseover(tooltip, toolbar_mouse_event);\n", - " nav_element.append(button);\n", - " }\n", - "\n", - " // Add the status bar.\n", - " var status_bar = $('');\n", - " nav_element.append(status_bar);\n", - " this.message = status_bar[0];\n", - "\n", - " // Add the close button to the window.\n", - " var buttongrp = $('
');\n", - " var button = $('');\n", - " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", - " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", - " buttongrp.append(button);\n", - " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", - " titlebar.prepend(buttongrp);\n", - "}\n", - "\n", - "mpl.figure.prototype._root_extra_style = function(el){\n", - " var fig = this\n", - " el.on(\"remove\", function(){\n", - "\tfig.close_ws(fig, {});\n", - " });\n", - "}\n", - "\n", - "mpl.figure.prototype._canvas_extra_style = function(el){\n", - " // this is important to make the div 'focusable\n", - " el.attr('tabindex', 0)\n", - " // reach out to IPython and tell the keyboard manager to turn it's self\n", - " // off when our div gets focus\n", - "\n", - " // location in version 3\n", - " if (IPython.notebook.keyboard_manager) {\n", - " IPython.notebook.keyboard_manager.register_events(el);\n", - " }\n", - " else {\n", - " // location in version 2\n", - " IPython.keyboard_manager.register_events(el);\n", - " }\n", - "\n", - "}\n", - "\n", - "mpl.figure.prototype._key_event_extra = function(event, name) {\n", - " var manager = IPython.notebook.keyboard_manager;\n", - " if (!manager)\n", - " manager = IPython.keyboard_manager;\n", - "\n", - " // Check for shift+enter\n", - " if (event.shiftKey && event.which == 13) {\n", - " this.canvas_div.blur();\n", - " // select the cell after this one\n", - " var index = IPython.notebook.find_cell_index(this.cell_info[0]);\n", - " IPython.notebook.select(index + 1);\n", - " }\n", - "}\n", - "\n", - "mpl.figure.prototype.handle_save = function(fig, msg) {\n", - " fig.ondownload(fig, null);\n", - "}\n", - "\n", - "\n", - "mpl.find_output_cell = function(html_output) {\n", - " // Return the cell and output element which can be found *uniquely* in the notebook.\n", - " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", - " // IPython event is triggered only after the cells have been serialised, which for\n", - " // our purposes (turning an active figure into a static one), is too late.\n", - " var cells = IPython.notebook.get_cells();\n", - " var ncells = cells.length;\n", - " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", - " data = data.data;\n", - " }\n", - " if (data['text/html'] == html_output) {\n", - " return [cell, data, j];\n", - " }\n", - " }\n", - " }\n", - " }\n", - "}\n", - "\n", - "// Register the function which deals with the matplotlib target/channel.\n", - "// The kernel may be null if the page has been refreshed.\n", - "if (IPython.notebook.kernel != null) {\n", - " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", - "}\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 42, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# Calculate the distance matrix between trial types\n", "distance_matrix = sp_distance.squareform(sp_distance.pdist(np.vstack([trials_A.transpose(), trials_B.transpose()])))\n", @@ -8178,18 +858,9 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Mean difference between condition A and B: -4.83\n", - "p value: 0.026\n" - ] - } - ], + "outputs": [], "source": [ "mean_difference = (np.mean(trials_A,0) - np.mean(trials_B,0))\n", "ttest = stats.ttest_1samp(mean_difference, 0)\n", @@ -8208,17 +879,9 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Classification accuracy between condition A and B: 1.000\n" - ] - } - ], + "outputs": [], "source": [ "# Get the inputs to the SVM\n", "input_mat = np.vstack([trials_A.transpose(), trials_B.transpose()])\n", From 529b91754522233cc7ac82032598eced39563082 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 13 Mar 2018 22:28:06 -0400 Subject: [PATCH 14/51] Updated how fitting is done and how machine noise is calculated --- brainiak/utils/fmrisim.py | 119 +++++++++++++++++++++++++++++++++++++- 1 file changed, 118 insertions(+), 1 deletion(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index d0ea6a5eb..7a3d7c0ae 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1405,6 +1405,14 @@ def _generate_noise_system(dimensions_tr, What are the dimensions of the volume you wish to insert noise into. This can be a volume of any size + spatial_sd : float + What is the standard deviation in space of the noise volume to be + generated + + temporal_sd : float + What is the standard deviation in time of the noise volume to be + generated + noise_type : str String specifying the noise type. If you aren't specifying the noise template then Rician is the appropriate model of noise. However, @@ -2266,6 +2274,37 @@ def _fit_snr_sfnr(noise, Parameters ---------- + noise : multidimensional array, float + Initial estimate of the noise + + noise_temporal : multidimensional array, float + The temporal noise that was generated by _generate_temporal_noise + + tr_duration : float + What is the duration, in seconds, of each TR? + + template : 3d array, float + A continuous (0 -> 1) volume describing the likelihood a voxel is in + the brain. This can be used to contrast the brain and non brain. + + mask : 3d array, binary + The mask of the brain volume, distinguishing brain from non-brain + + spatial_sd : float + What is the standard deviation in space of the noise volume to be + generated + + temporal_sd : float + What is the standard deviation in time of the noise volume to be + generated + + temporal_proportion, float + What is the proportion of the temporal variance (as specified by the + SFNR noise parameter) that is accounted for by the system noise. If + this number is high then all of the temporal variability is due to + system noise, if it is low then all of the temporal variability is + due to brain variability. + noise_dict : dict A dictionary specifying the types of noise in this experiment. The noise types interact in important ways. First, all noise types @@ -2275,6 +2314,22 @@ def _fit_snr_sfnr(noise, parameter that describes how much noise these components contribute to the brain. + fit_thresh : float + What proportion of the target parameter value is sufficient error to + warrant finishing fit search. + + fit_delta : float + How much are the parameters attenuated during the fitting process, + in terms of the proportion of difference between the target + parameter and the actual parameter + + iterations : list, int + The first element is how many steps of fitting the SFNR and SNR values + will be performed. Usually converges after < 5. The second element + is the number of iterations for the AR fitting. This is much more + time consuming (has to make a new timecourse on each iteration) so + be careful about setting this appropriately. + Returns ------- @@ -2352,6 +2407,68 @@ def _fit_ar(noise, fit_delta, iterations, ): + """ + Fit the noise model to match the SNR and SFNR of the data + + Parameters + ---------- + + noise : multidimensional array, float + Initial estimate of the noise + + mask : 3d array, binary + The mask of the brain volume, distinguishing brain from non-brain + + template : 3d array, float + A continuous (0 -> 1) volume describing the likelihood a voxel is in + the brain. This can be used to contrast the brain and non brain. + + stimfunction_tr : Iterable, list + When do the stimuli events occur. Each element is a TR + + tr_duration : float + What is the duration, in seconds, of each TR? + + spatial_sd : float + What is the standard deviation in space of the noise volume to be + generated + + temporal_sd : float + What is the standard deviation in time of the noise volume to be + generated + + noise_dict : dict + A dictionary specifying the types of noise in this experiment. The + noise types interact in important ways. First, all noise types + ending with sigma (e.g. motion sigma) are mixed together in + _generate_temporal_noise. These values describe the proportion of + mixing of these elements. However critically, SFNR is the + parameter that describes how much noise these components contribute + to the brain. + + fit_thresh : float + What proportion of the target parameter value is sufficient error to + warrant finishing fit search. + + fit_delta : float + How much are the parameters attenuated during the fitting process, + in terms of the proportion of difference between the target + parameter and the actual parameter + + iterations : list, int + The first element is how many steps of fitting the SFNR and SNR values + will be performed. Usually converges after < 5. The second element + is the number of iterations for the AR fitting. This is much more + time consuming (has to make a new timecourse on each iteration) so + be careful about setting this appropriately. + + Returns + ------- + + noise : multidimensional array, float + Generates the noise volume given these parameters + + """ # Pull out the dim_tr = noise.shape @@ -2417,7 +2534,7 @@ def generate_noise(dimensions, mask=None, noise_dict=None, temporal_proportion=0.5, - iterations=[5, 20], + iterations=[20, 20], fit_thresh=0.05, fit_delta=0.5, ): From 4f82e481be330965a2aa2010153676c123c7bbe7 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 27 Mar 2018 21:32:37 -0400 Subject: [PATCH 15/51] Updated the export epoch file to deal with error in epoch number generation. Moreover, changed calc snr --- brainiak/utils/fmrisim.py | 83 ++++++++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 32 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 7a3d7c0ae..ec455bf70 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -648,46 +648,56 @@ def export_epoch_file(stimfunction, # What is the time course for the participant (binarized) stimfunction_ppt = np.abs(stimfunction[participant_counter]) > 0 - # Cycle through conditions + # Down sample the stim function + stride = tr_duration * temporal_resolution + stimfunction_downsampled = stimfunction_ppt[::int(stride), :] + + # Calculates the number of event onsets. This uses changes in value to reflect + # different epochs. This might be false in some cases (the + # weight is non-uniform over an epoch or there is no + # break between identically weighted epochs). + epochs = 0 # Preset conditions = stimfunction_ppt.shape[1] for condition_counter in range(conditions): - # Down sample the stim function - stride = tr_duration * temporal_resolution - stimfunction_temp = stimfunction_ppt[:, condition_counter] - stimfunction_temp = stimfunction_temp[::int(stride)] + weight_change = ( + np.diff(stimfunction_downsampled[:, condition_counter], 1, 0) != 0) - if condition_counter == 0: - # Calculates the number of event onsets (max of all - # conditions). This uses changes in value to reflect - # different epochs. This might be false in some cases (the - # weight is supposed to unfold over an epoch or there is no - # break between identically weighted epochs). In such cases - # this will not work - weight_change = (np.diff(stimfunction_temp, 1, 0) != 0) - epochs = int(np.max(np.sum(weight_change, 0)) / 2) + # If the first or last events are 'on' then make these + # represent a epoch change + if stimfunction_downsampled[0, condition_counter] == 1: + weight_change[0] = True + if stimfunction_downsampled[-1, condition_counter] == 1: + weight_change[-1] = True - # Get other information - trs = stimfunction_temp.shape[0] + epochs += int(np.max(np.sum(weight_change, 0)) / 2) - # Make a timing file for this participant - epoch_file[participant_counter] = np.zeros((conditions, - epochs, trs)) + # Get other information + trs = stimfunction_downsampled.shape[0] - epoch_counter = 0 - tr_counter = 0 - while tr_counter < stimfunction_temp.shape[0]: + # Make a timing file for this participant + epoch_file[participant_counter] = np.zeros((conditions, + epochs, trs)) - # Is it an event? - if stimfunction_temp[tr_counter] == 1: + # Cycle through conditions + epoch_counter = 0 # Reset and count across conditions + tr_counter = 0 + while tr_counter < stimfunction_downsampled.shape[0]: + + for condition_counter in range(conditions): + # Is it an event? + if tr_counter < stimfunction_downsampled.shape[0] and \ + stimfunction_downsampled[ + tr_counter, condition_counter] == 1: # Add a one for this TR epoch_file[participant_counter][condition_counter, epoch_counter, tr_counter] = 1 # Find the next non event value - end_idx = np.where(stimfunction_temp[tr_counter:] == 0)[ + end_idx = np.where(stimfunction_downsampled[tr_counter:, + condition_counter] == 0)[ 0][0] tr_idxs = list(range(tr_counter, tr_counter + end_idx)) @@ -705,6 +715,10 @@ def export_epoch_file(stimfunction, # Increment the counter tr_counter += 1 + + # Convert to boolean + epoch_file = epoch_file.astype('bool') + # Save the file np.save(filename, epoch_file) @@ -1163,9 +1177,9 @@ def _calc_snr(volume, """ - # If no TR is specified then take the middle one + # If no TR is specified then take all of them if tr is None: - tr = int(np.ceil(volume.shape[3] / 2)) + tr = list(range(volume.shape[3])) # Dilate the mask in order to ensure that non-brain voxels are far from # the brain @@ -1175,14 +1189,19 @@ def _calc_snr(volume, else: mask_dilated = mask - # Make a matrix of brain and non_brain voxels, selecting the timepoint + # Make a matrix of brain and non_brain voxels, selecting the timepoint/s brain_voxels = volume[mask > 0][:, tr] nonbrain_voxels = (volume[:, :, :, tr]).astype('float64') - # Do you want to remove the average of the periphery (removes - # structure, leaving only variability) - if template_baseline is not None: - nonbrain_voxels -= template_baseline + # If you have multiple TRs + if len(brain_voxels.shape) > 1: + brain_voxels = np.mean(brain_voxels, 1) + nonbrain_voxels = np.mean(nonbrain_voxels, 3) + + # # Do you want to remove the average of the periphery (removes + # # structure, leaving only variability) + # if template_baseline is not None: + # nonbrain_voxels -= template_baseline nonbrain_voxels = nonbrain_voxels[mask_dilated == 0] From 0899cbdda704d3847f5d85c63fbab29f7e05b28a Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 15 May 2018 19:22:09 -0400 Subject: [PATCH 16/51] Improved export_epoch, added more logging info, zscoring AR, reduced AR coefs to 1, added some warnings/tests of input data quality, split fitting up into 3 functions --- brainiak/utils/fmrisim.py | 292 +++++++++++++++++++++++++++++++------- 1 file changed, 239 insertions(+), 53 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index ec455bf70..2c2f4293c 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -492,7 +492,7 @@ def generate_stimfunction(onsets, 'Consider increasing' \ ' the temporal ' \ 'resolution.' - logging.warning(warning) + logger.warning(warning) onsets.append(float(onset)) event_durations.append(float(duration)) @@ -508,7 +508,7 @@ def generate_stimfunction(onsets, # Check files if np.max(onsets) > total_time: - print('Onsets outside of range of total time. Aborting') + logger.info('Onsets outside of range of total time. Aborting') exit() # Generate the time course as empty, each element is a millisecond by @@ -630,7 +630,7 @@ def export_epoch_file(stimfunction, label them as different epochs filename : str - The name of the three column text file to be output + The name of the epoch file to be output tr_duration : float How long is each TR in seconds @@ -715,9 +715,8 @@ def export_epoch_file(stimfunction, # Increment the counter tr_counter += 1 - - # Convert to boolean - epoch_file = epoch_file.astype('bool') + # Convert to boolean + epoch_file[participant_counter] = epoch_file[participant_counter].astype('bool') # Save the file np.save(filename, epoch_file) @@ -1217,7 +1216,7 @@ def _calc_snr(volume, def _calc_ARMA_noise(volume, mask, - auto_reg_order=2, + auto_reg_order=1, ma_order=1, sample_num=100, ): @@ -1332,6 +1331,10 @@ def calc_noise(volume, """ + # Check the inputs + if template.max() > 1.1: + raise ValueError('Template out of range') + # Create the mask if not supplied and set the mask size if mask is None: mask = np.ones(volume.shape[:-1]) @@ -1734,6 +1737,10 @@ def _generate_noise_temporal_autoregression(timepoints, noise_autoregression[:, :, :, tr_counter] = AR_vol + noise + # Z score the data so that all of the standard deviations of the voxels + # are one (but the ARMA coefs are unchanged) + noise_autoregression = stats.zscore(noise_autoregression, 3) + return noise_autoregression @@ -2257,9 +2264,9 @@ def _noise_dict_update(noise_dict): if 'auto_reg_sigma' not in noise_dict: noise_dict['auto_reg_sigma'] = 1 if 'auto_reg_rho' not in noise_dict: - noise_dict['auto_reg_rho'] = [1.0, -0.5] + noise_dict['auto_reg_rho'] = [0.5] if 'ma_rho' not in noise_dict: - noise_dict['ma_rho'] = [0] + noise_dict['ma_rho'] = [0.0] if 'physiological_sigma' not in noise_dict: noise_dict['physiological_sigma'] = 0 if 'sfnr' not in noise_dict: @@ -2275,20 +2282,151 @@ def _noise_dict_update(noise_dict): return noise_dict -def _fit_snr_sfnr(noise, - noise_temporal, - mask, - template, - spatial_sd, - temporal_sd, - temporal_proportion, - noise_dict, - fit_thresh, - fit_delta, - iterations, - ): + +def _fit_snr(noise, + noise_temporal, + mask, + template, + spatial_sd, + temporal_sd, + noise_dict, + fit_thresh, + fit_delta, + iterations, + ): """ - Fit the noise model to match the SNR and SFNR of the data + Fit the noise model to match the SNR of the data + + Parameters + ---------- + + noise : multidimensional array, float + Initial estimate of the noise + + noise_temporal : multidimensional array, float + The temporal noise that was generated by _generate_temporal_noise + + tr_duration : float + What is the duration, in seconds, of each TR? + + template : 3d array, float + A continuous (0 -> 1) volume describing the likelihood a voxel is in + the brain. This can be used to contrast the brain and non brain. + + mask : 3d array, binary + The mask of the brain volume, distinguishing brain from non-brain + + spatial_sd : float + What is the standard deviation in space of the noise volume to be + generated + + temporal_sd : float + What is the standard deviation in time of the noise volume to be + generated + + noise_dict : dict + A dictionary specifying the types of noise in this experiment. The + noise types interact in important ways. First, all noise types + ending with sigma (e.g. motion sigma) are mixed together in + _generate_temporal_noise. These values describe the proportion of + mixing of these elements. However critically, SFNR is the + parameter that describes how much noise these components contribute + to the brain. + + fit_thresh : float + What proportion of the target parameter value is sufficient error to + warrant finishing fit search. + + fit_delta : float + How much are the parameters attenuated during the fitting process, + in terms of the proportion of difference between the target + parameter and the actual parameter + + iterations : int + The first element is how many steps of fitting the SFNR and SNR values + will be performed. Usually converges after < 5. The second element + is the number of iterations for the AR fitting. This is much more + time consuming (has to make a new timecourse on each iteration) so + be careful about setting this appropriately. + + Returns + ------- + + noise : multidimensional array, float + Generates the noise volume given these parameters + + """ + + # Pull out information that is needed + dim_tr = noise.shape + base = template * noise_dict['max_activity'] + base = base.reshape(dim_tr[0], dim_tr[1], dim_tr[2], 1) + mean_signal = (base[mask > 0]).mean() + target_snr = noise_dict['snr'] + + # Iterate through different parameters to fit SNR and SFNR + spat_sd_orig = np.copy(spatial_sd) + iteration = 0 + for iteration in list(range(iterations)): + + # Calculate the new metrics + new_snr = _calc_snr(noise, mask, template_baseline=template) + + # Calculate the difference between the real and simulated data + diff_snr = abs(new_snr - target_snr) / target_snr + + # If the AR is sufficiently close then break the loop + if diff_snr < fit_thresh: + logger.info('Terminated SNR fit after ' + str( + iteration) + ' iterations.') + break + + # Convert the SFNR and SNR + spat_sd_new = mean_signal / new_snr + + # Update the variable + spatial_sd -= ((spat_sd_new - spat_sd_orig) * fit_delta) + + # Prevent these going out of range + if spatial_sd < 0 or np.isnan(spatial_sd): + spatial_sd = 10e-3 + + # Set up the machine noise + noise_system = _generate_noise_system(dimensions_tr=dim_tr, + spatial_sd=spatial_sd, + temporal_sd=temporal_sd, + ) + + # Sum up the noise of the brain + noise = base + (noise_temporal * (1 - temporal_sd)) + noise_system + + # Reject negative values (only happens outside of the brain) + noise[noise < 0] = 0 + + # Failed to converge + if iterations == 0: + logger.info('No fitting iterations were run') + elif iteration == iterations: + logger.info('SNR failed to converge.') + + # Return the updated noise + return noise, spatial_sd + + +def _fit_sfnr(noise, + noise_temporal, + mask, + template, + spatial_sd, + temporal_sd, + temporal_proportion, + noise_dict, + fit_thresh, + fit_delta, + iterations, + ): + """ + Fit the noise model to match the SFNR of the data Parameters ---------- @@ -2342,7 +2480,7 @@ def _fit_snr_sfnr(noise, in terms of the proportion of difference between the target parameter and the actual parameter - iterations : list, int + iterations : int The first element is how many steps of fitting the SFNR and SNR values will be performed. Usually converges after < 5. The second element is the number of iterations for the AR fitting. This is much more @@ -2363,41 +2501,34 @@ def _fit_snr_sfnr(noise, base = base.reshape(dim_tr[0], dim_tr[1], dim_tr[2], 1) mean_signal = (base[mask > 0]).mean() target_sfnr = noise_dict['sfnr'] - target_snr = noise_dict['snr'] # Iterate through different parameters to fit SNR and SFNR - spat_sd_orig = np.copy(spatial_sd) temp_sd_orig = np.copy(temporal_sd) + iteration = 0 for iteration in list(range(iterations)): # Calculate the new metrics new_sfnr = _calc_sfnr(noise, mask) - new_snr = _calc_snr(noise, mask, template_baseline=template) # Calculate the difference between the real and simulated data diff_sfnr = abs(new_sfnr - target_sfnr) / target_sfnr - diff_snr = abs(new_snr - target_snr) / target_snr # If the AR is sufficiently close then break the loop - if diff_sfnr < fit_thresh and diff_snr < fit_thresh: - print('Terminated SNR and SFNR fit after ' + str( + if diff_sfnr < fit_thresh: + logger.info('Terminated SFNR fit after ' + str( iteration) + ' iterations.') break # Convert the SFNR and SNR temp_sd_new = np.sqrt(((mean_signal / new_sfnr) ** 2) * temporal_proportion) - spat_sd_new = mean_signal / new_snr # Update the variables temporal_sd -= ((temp_sd_new - temp_sd_orig) * fit_delta) - spatial_sd -= ((spat_sd_new - spat_sd_orig) * fit_delta) # Prevent these going out of range if temporal_sd < 0 or np.isnan(temporal_sd): temporal_sd = 10e-3 - if spatial_sd < 0 or np.isnan(spatial_sd): - spatial_sd = 10e-3 # Set up the machine noise noise_system = _generate_noise_system(dimensions_tr=dim_tr, @@ -2411,8 +2542,15 @@ def _fit_snr_sfnr(noise, # Reject negative values (only happens outside of the brain) noise[noise < 0] = 0 + # Failed to converge + if iterations == 0: + logger.info('No fitting iterations were run') + elif iteration == iterations: + logger.info('SFNR failed to converge.') + # Return the updated noise - return noise, spatial_sd, temporal_sd + return noise, temporal_sd + def _fit_ar(noise, mask, @@ -2475,11 +2613,11 @@ def _fit_ar(noise, parameter and the actual parameter iterations : list, int - The first element is how many steps of fitting the SFNR and SNR values - will be performed. Usually converges after < 5. The second element - is the number of iterations for the AR fitting. This is much more - time consuming (has to make a new timecourse on each iteration) so - be careful about setting this appropriately. + The first element is how many steps of fitting the SFNR and SNR + values will be performed. Usually converges after < 5. The + second element is the number of iterations for the AR fitting. + This is much more time consuming (has to make a new timecourse + on each iteration) so be careful about setting this appropriately. Returns ------- @@ -2494,8 +2632,13 @@ def _fit_ar(noise, dim = dim_tr[0:3] base = template * noise_dict['max_activity'] base = base.reshape(dim[0], dim[1], dim[2], 1) + mean_signal = (base[mask > 0]).mean() + target_sfnr = noise_dict['sfnr'] # Iterate through different MA parameters to fit AR + temp_sd_orig = np.copy(temporal_sd) + temporal_proportion = 0.5 + iteration = 0 for iteration in list(range(iterations)): # If there are iterations left to perform then recalculate the @@ -2512,9 +2655,15 @@ def _fit_ar(noise, # Calculate the difference in the first AR component ar_0_diff = abs(new_ar[0] - target_ar[0]) / target_ar[0] + # Calculate the new metrics + new_sfnr = _calc_sfnr(noise, mask) + + # Calculate the difference between the real and simulated data + diff_sfnr = abs(new_sfnr - target_sfnr) / target_sfnr + # If the AR is sufficiently close then break the loop - if ar_0_diff < fit_thresh: - print('Terminated AR fit after ' + str(iteration) + + if ar_0_diff < fit_thresh and diff_sfnr < fit_thresh: + logger.info('Terminated AR fit after ' + str(iteration) + ' iterations.') break else: @@ -2522,6 +2671,17 @@ def _fit_ar(noise, noise_dict['ma_rho'] = [noise_dict['ma_rho'][0] - (ar_0_diff * fit_delta)] + # Convert the SFNR and SNR + temp_sd_new = np.sqrt(((mean_signal / new_sfnr) ** 2) * + temporal_proportion) + + # Update the variables + temporal_sd -= ((temp_sd_new - temp_sd_orig) * fit_delta) + + # Prevent these going out of range + if temporal_sd < 0 or np.isnan(temporal_sd): + temporal_sd = 10e-3 + # Generate the noise. The appropriate noise_temporal = _generate_noise_temporal(stimfunction_tr, tr_duration, @@ -2543,6 +2703,12 @@ def _fit_ar(noise, # Reject negative values (only happens outside of the brain) noise[noise < 0] = 0 + # Failed to converge + if iterations == 0: + logger.info('No fitting iterations were run') + elif iteration == iterations: + logger.info('AR failed to converge.') + # Return the updated noise return noise @@ -2619,6 +2785,13 @@ def generate_noise(dimensions, """ + # Check the input data + if template.max() > 1.1: + raise ValueError('Template out of range') + + if abs(noise_dict['auto_reg_rho'][0]) - abs(noise_dict['ma_rho'][0]) < 0.1: + logger.warning('ARMA coefs are close, may have troule fitting') + # Change to be an empty dictionary if it is None if noise_dict is None: noise_dict = {} @@ -2681,18 +2854,31 @@ def generate_noise(dimensions, noise[noise < 0] = 0 # Fit the SNR and SFNR - noise, spatial_sd, temporal_sd_system = _fit_snr_sfnr(noise, - noise_temporal, - mask, - template, - spatial_sd, - temporal_sd_system, - temporal_proportion, - noise_dict, - fit_thresh, - fit_delta, - iterations[0], - ) + noise, spatial_sd = _fit_snr(noise, + noise_temporal, + mask, + template, + spatial_sd, + temporal_sd_system, + noise_dict, + fit_thresh, + fit_delta, + iterations[0], + ) + + # Fit the SFNR + noise, temporal_sd = _fit_sfnr(noise, + noise_temporal, + mask, + template, + spatial_sd, + temporal_sd_system, + temporal_proportion, + noise_dict, + fit_thresh, + fit_delta, + iterations[0], + ) # Fit the AR noise = _fit_ar(noise, From 8b071f322143a2cf6f3003ba60c7401de8130ae7 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 15 May 2018 20:39:10 -0400 Subject: [PATCH 17/51] Fixed reference to noise dict before it wwas preset --- brainiak/utils/fmrisim.py | 6 +++--- tests/utils/test_fmrisim.py | 4 +--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 2c2f4293c..c5d9c4ccf 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -2789,9 +2789,6 @@ def generate_noise(dimensions, if template.max() > 1.1: raise ValueError('Template out of range') - if abs(noise_dict['auto_reg_rho'][0]) - abs(noise_dict['ma_rho'][0]) < 0.1: - logger.warning('ARMA coefs are close, may have troule fitting') - # Change to be an empty dictionary if it is None if noise_dict is None: noise_dict = {} @@ -2799,6 +2796,9 @@ def generate_noise(dimensions, # Take in the noise dictionary and add any missing information noise_dict = _noise_dict_update(noise_dict) + if abs(noise_dict['auto_reg_rho'][0]) - abs(noise_dict['ma_rho'][0]) < 0.1: + logger.warning('ARMA coefs are close, may have troule fitting') + # What are the dimensions of the volume, including time dimensions_tr = (dimensions[0], dimensions[1], diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index 81cebeb50..280776af6 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -320,6 +320,4 @@ def test_calc_noise(): sfnr_diff = abs(nd_orig['sfnr'] - nd_new['sfnr']) assert sfnr_diff < 10, 'sfnr calculated incorrectly' ar1_diff = abs(nd_orig['auto_reg_rho'][0] - nd_new['auto_reg_rho'][0]) - assert ar1_diff < 1, 'AR1 calculated incorrectly' - ar2_diff = abs(nd_orig['auto_reg_rho'][1] - nd_new['auto_reg_rho'][1]) - assert ar2_diff < 1, 'AR2 calculated incorrectly' \ No newline at end of file + assert ar1_diff < 1, 'AR1 calculated incorrectly' \ No newline at end of file From 2da8948e72b0841a667616549e36827b8524f843 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Thu, 24 May 2018 16:06:54 -0400 Subject: [PATCH 18/51] Increased the precision of the ar estimation and added a toggle in the noise dict for matching participants --- brainiak/utils/fmrisim.py | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index c5d9c4ccf..0e6b2f5bb 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1218,7 +1218,7 @@ def _calc_ARMA_noise(volume, mask, auto_reg_order=1, ma_order=1, - sample_num=100, + sample_num=1000, ): """ Calculate the the ARMA noise of a volume This calculates the autoregressive and moving average noise of the volume @@ -1653,7 +1653,8 @@ def _generate_noise_temporal_autoregression(timepoints, _generate_temporal_noise. The sigma values describe the proportion of mixing of these elements. However critically, SFNR is the parameter that describes how much noise these components contribute - to the brain. + to the brain. If you set the noise dict to matched then it will fit + the parameters to match the participant as best as possible. dimensions : 3 length array, int What is the shape of the volume to be generated @@ -2005,7 +2006,8 @@ def _generate_noise_temporal(stimfunction_tr, _generate_temporal_noise. The sigma values describe the proportion of mixing of these elements. However critically, SFNR is the parameter that describes how much noise these components contribute - to the brain. + to the brain. If you set the noise dict to matched then it will fit + the parameters to match the participant as best as possible. Returns ---------- @@ -2244,7 +2246,8 @@ def _noise_dict_update(noise_dict): _generate_temporal_noise. These values describe the proportion of mixing of these elements. However critically, SFNR is the parameter that describes how much noise these components contribute - to the brain. + to the brain. If you set the noise dict to matched then it will fit + the parameters to match the participant as best as possible. Returns ------- @@ -2279,6 +2282,8 @@ def _noise_dict_update(noise_dict): noise_dict['voxel_size'] = [1.0, 1.0, 1.0] if 'fwhm' not in noise_dict: noise_dict['fwhm'] = 4 + if 'matched' not in noise_dict: + noise_dict['matched'] = 0 return noise_dict @@ -2331,7 +2336,8 @@ def _fit_snr(noise, _generate_temporal_noise. These values describe the proportion of mixing of these elements. However critically, SFNR is the parameter that describes how much noise these components contribute - to the brain. + to the brain. If you set the noise dict to matched then it will + fit the parameters to match the participant as best as possible. fit_thresh : float What proportion of the target parameter value is sufficient error to @@ -2469,7 +2475,8 @@ def _fit_sfnr(noise, _generate_temporal_noise. These values describe the proportion of mixing of these elements. However critically, SFNR is the parameter that describes how much noise these components contribute - to the brain. + to the brain. If you set the noise dict to matched then it will + fit the parameters to match the participant as best as possible. fit_thresh : float What proportion of the target parameter value is sufficient error to @@ -2601,7 +2608,8 @@ def _fit_ar(noise, _generate_temporal_noise. These values describe the proportion of mixing of these elements. However critically, SFNR is the parameter that describes how much noise these components contribute - to the brain. + to the brain. If you set the noise dict to matched then it will + fit the parameters to match the participant as best as possible. fit_thresh : float What proportion of the target parameter value is sufficient error to @@ -2719,7 +2727,7 @@ def generate_noise(dimensions, mask=None, noise_dict=None, temporal_proportion=0.5, - iterations=[20, 20], + iterations=None, fit_thresh=0.05, fit_delta=0.5, ): @@ -2796,6 +2804,15 @@ def generate_noise(dimensions, # Take in the noise dictionary and add any missing information noise_dict = _noise_dict_update(noise_dict) + # How many iterations will you perform? If unspecified it will set + # values based on whether you are trying to match noise specifically to + # this participant or just get in the ball park + if iterations is None: + if noise_dict['matched'] == 1: + iterations = [20, 20] + else: + iterations = [0, 0] + if abs(noise_dict['auto_reg_rho'][0]) - abs(noise_dict['ma_rho'][0]) < 0.1: logger.warning('ARMA coefs are close, may have troule fitting') @@ -2932,7 +2949,8 @@ def compute_signal_change(signal_function, _generate_temporal_noise. The sigma values describe the proportion of mixing of these elements. However critically, SFNR is the parameter that describes how much noise these components contribute - to the brain. + to the brain. If you set the noise dict to matched then it will + fit the parameters to match the participant as best as possible. magnitude : list of floats This specifies the size, in terms of the metric choosen below, From dc9cbe90b393e30515d615d047ef0eac828482e2 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Thu, 24 May 2018 16:12:39 -0400 Subject: [PATCH 19/51] Increased the precision of the ar estimation and added a toggle in the noise dict for matching participants --- brainiak/utils/fmrisim.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 0e6b2f5bb..de9bedaf3 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1239,7 +1239,11 @@ def _calc_ARMA_noise(volume, What order of the autoregression do you want to estimate sample_num : int - How many voxels would you like to sample to calculate the AR values + How many voxels would you like to sample to calculate the AR values. + The AR distribution of real data is approximately exponential maxing + at 1. From analyses across a number of participants, to get less + than 1% standard deviation of error from the true mean it is + necessary to sample at least 1000 voxels. Returns ------- From f1f0bbe40afeda2c41742edbb88cfde3bee0ab92 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Sun, 27 May 2018 15:36:26 -0400 Subject: [PATCH 20/51] Fit AR directly --- brainiak/utils/fmrisim.py | 57 ++++++++++++++------------------------- 1 file changed, 20 insertions(+), 37 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index de9bedaf3..316298f32 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -89,6 +89,7 @@ from scipy import stats from scipy import signal import scipy.ndimage as ndimage +import copy __all__ = [ "generate_signal", @@ -1218,7 +1219,7 @@ def _calc_ARMA_noise(volume, mask, auto_reg_order=1, ma_order=1, - sample_num=1000, + sample_num=100, ): """ Calculate the the ARMA noise of a volume This calculates the autoregressive and moving average noise of the volume @@ -1702,7 +1703,6 @@ def _generate_noise_temporal_autoregression(timepoints, # Create a brain shaped volume with appropriate smoothing properties noise = _generate_noise_spatial(dimensions=dimensions, - template=template, mask=mask, fwhm=noise_dict['fwhm'], ) @@ -1797,7 +1797,6 @@ def _generate_noise_temporal_phys(timepoints, def _generate_noise_spatial(dimensions, - template=None, mask=None, fwhm=4.0, ): @@ -1958,10 +1957,10 @@ def _Pk2(idxs, sigma): noise_spatial = np.fft.ifftn(noise * amplitude) # Mask or not, then z score - if mask is not None and template is not None: + if mask is not None: # Mask the output - noise_spatial = noise_spatial.real * template + noise_spatial = noise_spatial.real * mask # Z score the specific to the brain noise_spatial[mask > 0] = stats.zscore(noise_spatial[mask > 0]) @@ -2053,7 +2052,6 @@ def _generate_noise_temporal(stimfunction_tr, # Create a brain shaped volume with similar smoothing properties volume = _generate_noise_spatial(dimensions=dimensions, - template=template, mask=mask, fwhm=noise_dict['fwhm'], ) @@ -2085,7 +2083,6 @@ def _generate_noise_temporal(stimfunction_tr, # Create a brain shaped volume with similar smoothing properties volume = _generate_noise_spatial(dimensions=dimensions, - template=template, mask=mask, fwhm=noise_dict['fwhm'], ) @@ -2576,7 +2573,7 @@ def _fit_ar(noise, iterations, ): """ - Fit the noise model to match the SNR and SFNR of the data + Fit the noise model to match the AR of the data by changing the MA Parameters ---------- @@ -2644,18 +2641,18 @@ def _fit_ar(noise, dim = dim_tr[0:3] base = template * noise_dict['max_activity'] base = base.reshape(dim[0], dim[1], dim[2], 1) - mean_signal = (base[mask > 0]).mean() - target_sfnr = noise_dict['sfnr'] + + # Make a copy of the dictionary so it can be modified + new_nd = copy.deepcopy(noise_dict) + + # What AR do you want? + target_ar = noise_dict['auto_reg_rho'][0] # Iterate through different MA parameters to fit AR - temp_sd_orig = np.copy(temporal_sd) - temporal_proportion = 0.5 - iteration = 0 for iteration in list(range(iterations)): # If there are iterations left to perform then recalculate the # metrics and try again - target_ar = noise_dict['auto_reg_rho'] # Calculate the new metrics new_ar, _ = _calc_ARMA_noise(noise, @@ -2665,34 +2662,20 @@ def _fit_ar(noise, ) # Calculate the difference in the first AR component - ar_0_diff = abs(new_ar[0] - target_ar[0]) / target_ar[0] - - # Calculate the new metrics - new_sfnr = _calc_sfnr(noise, mask) - - # Calculate the difference between the real and simulated data - diff_sfnr = abs(new_sfnr - target_sfnr) / target_sfnr + ar_0_diff = new_ar[0] - target_ar # If the AR is sufficiently close then break the loop - if ar_0_diff < fit_thresh and diff_sfnr < fit_thresh: + if (abs(ar_0_diff) / target_ar) < fit_thresh: logger.info('Terminated AR fit after ' + str(iteration) + - ' iterations.') + ' iterations. Fit value: ' + str(new_nd['auto_reg_rho'][0])) break else: - # Otherwise update the ma coefficient - noise_dict['ma_rho'] = [noise_dict['ma_rho'][0] - (ar_0_diff * - fit_delta)] - - # Convert the SFNR and SNR - temp_sd_new = np.sqrt(((mean_signal / new_sfnr) ** 2) * - temporal_proportion) - - # Update the variables - temporal_sd -= ((temp_sd_new - temp_sd_orig) * fit_delta) + # Otherwise update the AR coefficient according to what is needed + new_nd['auto_reg_rho'][0] -= (ar_0_diff * fit_delta) - # Prevent these going out of range - if temporal_sd < 0 or np.isnan(temporal_sd): - temporal_sd = 10e-3 + # Don't let the AR coefficient exceed 1 + if new_nd['auto_reg_rho'][0] >= 1: + new_nd['auto_reg_rho'][0] = 0.99 # Generate the noise. The appropriate noise_temporal = _generate_noise_temporal(stimfunction_tr, @@ -2700,7 +2683,7 @@ def _fit_ar(noise, dim, template, mask, - noise_dict, + new_nd, ) # Set up the machine noise From a543aac17b8333762ea1fca31579448cc2c613b6 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Mon, 28 May 2018 01:15:50 -0400 Subject: [PATCH 21/51] Consolidate spatial and temporal noise fitting --- brainiak/utils/fmrisim.py | 311 ++++++++++++-------------------------- 1 file changed, 96 insertions(+), 215 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 316298f32..694d4a62e 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -2289,17 +2289,17 @@ def _noise_dict_update(noise_dict): return noise_dict -def _fit_snr(noise, - noise_temporal, - mask, - template, - spatial_sd, - temporal_sd, - noise_dict, - fit_thresh, - fit_delta, - iterations, - ): +def _fit_spatial(noise, + noise_temporal, + mask, + template, + spatial_sd, + temporal_sd, + noise_dict, + fit_thresh, + fit_delta, + iterations, + ): """ Fit the noise model to match the SNR of the data @@ -2420,160 +2420,21 @@ def _fit_snr(noise, return noise, spatial_sd -def _fit_sfnr(noise, - noise_temporal, - mask, - template, - spatial_sd, - temporal_sd, - temporal_proportion, - noise_dict, - fit_thresh, - fit_delta, - iterations, - ): - """ - Fit the noise model to match the SFNR of the data - - Parameters - ---------- - - noise : multidimensional array, float - Initial estimate of the noise - - noise_temporal : multidimensional array, float - The temporal noise that was generated by _generate_temporal_noise - - tr_duration : float - What is the duration, in seconds, of each TR? - - template : 3d array, float - A continuous (0 -> 1) volume describing the likelihood a voxel is in - the brain. This can be used to contrast the brain and non brain. - - mask : 3d array, binary - The mask of the brain volume, distinguishing brain from non-brain - - spatial_sd : float - What is the standard deviation in space of the noise volume to be - generated - - temporal_sd : float - What is the standard deviation in time of the noise volume to be - generated - - temporal_proportion, float - What is the proportion of the temporal variance (as specified by the - SFNR noise parameter) that is accounted for by the system noise. If - this number is high then all of the temporal variability is due to - system noise, if it is low then all of the temporal variability is - due to brain variability. - - noise_dict : dict - A dictionary specifying the types of noise in this experiment. The - noise types interact in important ways. First, all noise types - ending with sigma (e.g. motion sigma) are mixed together in - _generate_temporal_noise. These values describe the proportion of - mixing of these elements. However critically, SFNR is the - parameter that describes how much noise these components contribute - to the brain. If you set the noise dict to matched then it will - fit the parameters to match the participant as best as possible. - - fit_thresh : float - What proportion of the target parameter value is sufficient error to - warrant finishing fit search. - - fit_delta : float - How much are the parameters attenuated during the fitting process, - in terms of the proportion of difference between the target - parameter and the actual parameter - - iterations : int - The first element is how many steps of fitting the SFNR and SNR values - will be performed. Usually converges after < 5. The second element - is the number of iterations for the AR fitting. This is much more - time consuming (has to make a new timecourse on each iteration) so - be careful about setting this appropriately. - - Returns - ------- - - noise : multidimensional array, float - Generates the noise volume given these parameters - - """ - - # Pull out information that is needed - dim_tr = noise.shape - base = template * noise_dict['max_activity'] - base = base.reshape(dim_tr[0], dim_tr[1], dim_tr[2], 1) - mean_signal = (base[mask > 0]).mean() - target_sfnr = noise_dict['sfnr'] - - # Iterate through different parameters to fit SNR and SFNR - temp_sd_orig = np.copy(temporal_sd) - iteration = 0 - for iteration in list(range(iterations)): - - # Calculate the new metrics - new_sfnr = _calc_sfnr(noise, mask) - - # Calculate the difference between the real and simulated data - diff_sfnr = abs(new_sfnr - target_sfnr) / target_sfnr - - # If the AR is sufficiently close then break the loop - if diff_sfnr < fit_thresh: - logger.info('Terminated SFNR fit after ' + str( - iteration) + ' iterations.') - break - - # Convert the SFNR and SNR - temp_sd_new = np.sqrt(((mean_signal / new_sfnr) ** 2) * - temporal_proportion) - - # Update the variables - temporal_sd -= ((temp_sd_new - temp_sd_orig) * fit_delta) - - # Prevent these going out of range - if temporal_sd < 0 or np.isnan(temporal_sd): - temporal_sd = 10e-3 - - # Set up the machine noise - noise_system = _generate_noise_system(dimensions_tr=dim_tr, - spatial_sd=spatial_sd, - temporal_sd=temporal_sd, - ) - - # Sum up the noise of the brain - noise = base + (noise_temporal * (1 - temporal_sd)) + noise_system - - # Reject negative values (only happens outside of the brain) - noise[noise < 0] = 0 - - # Failed to converge - if iterations == 0: - logger.info('No fitting iterations were run') - elif iteration == iterations: - logger.info('SFNR failed to converge.') - - # Return the updated noise - return noise, temporal_sd - - -def _fit_ar(noise, - mask, - template, - stimfunction_tr, - tr_duration, - spatial_sd, - temporal_sd, - noise_dict, - fit_thresh, - fit_delta, - iterations, - ): +def _fit_temporal(noise, + mask, + template, + stimfunction_tr, + tr_duration, + spatial_sd, + temporal_proportion, + temporal_sd, + noise_dict, + fit_thresh, + fit_delta, + iterations, + ): """ - Fit the noise model to match the AR of the data by changing the MA + Fit the noise model to match the SFNR and AR of the data Parameters ---------- @@ -2598,6 +2459,13 @@ def _fit_ar(noise, What is the standard deviation in space of the noise volume to be generated + temporal_proportion, float + What is the proportion of the temporal variance (as specified by the + SFNR noise parameter) that is accounted for by the system noise. If + this number is high then all of the temporal variability is due to + system noise, if it is low then all of the temporal variability is + due to brain variability. + temporal_sd : float What is the standard deviation in time of the noise volume to be generated @@ -2641,10 +2509,17 @@ def _fit_ar(noise, dim = dim_tr[0:3] base = template * noise_dict['max_activity'] base = base.reshape(dim[0], dim[1], dim[2], 1) + mean_signal = (base[mask > 0]).mean() + + # Iterate through different parameters to fit SNR and SFNR + temp_sd_orig = np.copy(temporal_sd) # Make a copy of the dictionary so it can be modified new_nd = copy.deepcopy(noise_dict) + # What SFNR do you want + target_sfnr = noise_dict['sfnr'] + # What AR do you want? target_ar = noise_dict['auto_reg_rho'][0] @@ -2654,28 +2529,47 @@ def _fit_ar(noise, # If there are iterations left to perform then recalculate the # metrics and try again - # Calculate the new metrics + # Calculate the new SFNR + new_sfnr = _calc_sfnr(noise, mask) + + # Calculate the AR new_ar, _ = _calc_ARMA_noise(noise, mask, len(noise_dict['auto_reg_rho']), len(noise_dict['ma_rho']), ) + # Calculate the difference between the real and simulated data + sfnr_diff = abs(new_sfnr - target_sfnr) / target_sfnr + # Calculate the difference in the first AR component - ar_0_diff = new_ar[0] - target_ar + ar_diff = new_ar[0] - target_ar - # If the AR is sufficiently close then break the loop - if (abs(ar_0_diff) / target_ar) < fit_thresh: + # If the SFNR and AR is sufficiently close then break the loop + if (abs(ar_diff) / target_ar) < fit_thresh and sfnr_diff < fit_thresh: logger.info('Terminated AR fit after ' + str(iteration) + - ' iterations. Fit value: ' + str(new_nd['auto_reg_rho'][0])) + ' iterations.') break - else: - # Otherwise update the AR coefficient according to what is needed - new_nd['auto_reg_rho'][0] -= (ar_0_diff * fit_delta) - # Don't let the AR coefficient exceed 1 - if new_nd['auto_reg_rho'][0] >= 1: - new_nd['auto_reg_rho'][0] = 0.99 + ## Otherwise update the noise metrics + + # Get the new temporal noise value + temp_sd_new = mean_signal / new_sfnr + temporal_sd -= ((temp_sd_new - temp_sd_orig) * fit_delta) + + # Set the new system noise + temp_sd_system_new = np.sqrt((temporal_sd ** 2) * temporal_proportion) + + # Prevent these going out of range + if temporal_sd < 0 or np.isnan(temporal_sd): + temporal_sd = 10e-3 + + # Get the new AR value + new_nd['auto_reg_rho'][0] -= (ar_diff * fit_delta) + + # Don't let the AR coefficient exceed 1 + if new_nd['auto_reg_rho'][0] >= 1: + new_nd['auto_reg_rho'][0] = 0.99 # Generate the noise. The appropriate noise_temporal = _generate_noise_temporal(stimfunction_tr, @@ -2689,7 +2583,7 @@ def _fit_ar(noise, # Set up the machine noise noise_system = _generate_noise_system(dimensions_tr=dim_tr, spatial_sd=spatial_sd, - temporal_sd=temporal_sd, + temporal_sd=temp_sd_system_new, ) # Sum up the noise of the brain @@ -2857,46 +2751,33 @@ def generate_noise(dimensions, # Reject negative values (only happens outside of the brain) noise[noise < 0] = 0 - # Fit the SNR and SFNR - noise, spatial_sd = _fit_snr(noise, - noise_temporal, - mask, - template, - spatial_sd, - temporal_sd_system, - noise_dict, - fit_thresh, - fit_delta, - iterations[0], - ) + # Fit the SNR + noise, spatial_sd = _fit_spatial(noise, + noise_temporal, + mask, + template, + spatial_sd, + temporal_sd_system, + noise_dict, + fit_thresh, + fit_delta, + iterations[0], + ) - # Fit the SFNR - noise, temporal_sd = _fit_sfnr(noise, - noise_temporal, - mask, - template, - spatial_sd, - temporal_sd_system, - temporal_proportion, - noise_dict, - fit_thresh, - fit_delta, - iterations[0], - ) - - # Fit the AR - noise = _fit_ar(noise, - mask, - template, - stimfunction_tr, - tr_duration, - spatial_sd, - temporal_sd, - noise_dict, - fit_thresh, - fit_delta, - iterations[1], - ) + # Fit the SFNR and AR noise + noise = _fit_temporal(noise, + mask, + template, + stimfunction_tr, + tr_duration, + spatial_sd, + temporal_proportion, + temporal_sd, + noise_dict, + fit_thresh, + fit_delta, + iterations[1], + ) # Return the noise return noise From 2ae082e5df6924ea8d2596668baaa5341e2ea73f Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Mon, 28 May 2018 11:02:24 -0400 Subject: [PATCH 22/51] Adjusted noise terms --- brainiak/utils/fmrisim.py | 101 +++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 50 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 694d4a62e..cd1497843 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -2735,7 +2735,8 @@ def generate_noise(dimensions, temporal_sd_system = np.sqrt((temporal_sd ** 2) * temporal_proportion) # What is the standard deviation of the background activity - spatial_sd = mean_signal / noise_dict['snr'] + spat_sd = mean_signal / noise_dict['snr'] + spatial_sd = np.sqrt((spat_sd ** 2) * (1 - temporal_proportion)) # Set up the machine noise @@ -2745,7 +2746,7 @@ def generate_noise(dimensions, ) # Sum up the noise of the brain - noise = base + (noise_temporal * (1 - temporal_sd_system)) + \ + noise = base + (noise_temporal * (1 - temporal_sd)) + \ noise_system # Reject negative values (only happens outside of the brain) @@ -2794,61 +2795,61 @@ def compute_signal_change(signal_function, timecourse. Importantly, all values within the signal_function are scaled to have a min of -1 or max of 1 - Parameters - ---------- + Parameters + ---------- - signal_function : timepoint by voxel array - The signal time course to be altered. This can have - multiple time courses specified as different columns in this - array. Conceivably you could use the output of - generate_stimfunction as the input but the temporal variance - will be incorrect + signal_function : timepoint by voxel array + The signal time course to be altered. This can have + multiple time courses specified as different columns in this + array. Conceivably you could use the output of + generate_stimfunction as the input but the temporal variance + will be incorrect - noise_function : timepoint by voxel numpy array - The time course of noise (a voxel created from generate_noise) - for each voxel specified in signal_function. This is necessary - for computing the mean evoked activity and the noise variability + noise_function : timepoint by voxel numpy array + The time course of noise (a voxel created from generate_noise) + for each voxel specified in signal_function. This is necessary + for computing the mean evoked activity and the noise variability - noise_dict : dict - A dictionary specifying the types of noise in this experiment. The - noise types interact in important ways. First, all noise types - ending with sigma (e.g. motion sigma) are mixed together in - _generate_temporal_noise. The sigma values describe the proportion of - mixing of these elements. However critically, SFNR is the - parameter that describes how much noise these components contribute - to the brain. If you set the noise dict to matched then it will - fit the parameters to match the participant as best as possible. - - magnitude : list of floats - This specifies the size, in terms of the metric choosen below, - of the signal being generated. This can be a single number, - and thus apply to all signal timecourses, or it can be array and - thus different for each voxel. - - method : str - Select the procedure used to calculate the signal magnitude, - some of which are based on the definitions outlined in Welvaert & - Rosseel (2013): - - 'SFNR': Change proportional to the temporal variability, - as represented by the (desired) SFNR - - 'CNR_Amp/Noise-SD': Signal magnitude relative to the temporal - noise - - 'CNR_Amp2/Noise-Var_dB': Same as above but converted to decibels - - 'CNR_Signal-SD/Noise-SD': Standard deviation in signal - relative to standard deviation in noise - - 'CNR_Signal-Var/Noise-Var_dB': Same as above but converted to - decibels - - 'PSC': Calculate the percent signal change based on the - average activity of the noise (mean / 100 * magnitude) + noise_dict : dict + A dictionary specifying the types of noise in this experiment. The + noise types interact in important ways. First, all noise types + ending with sigma (e.g. motion sigma) are mixed together in + _generate_temporal_noise. The sigma values describe the proportion of + mixing of these elements. However critically, SFNR is the + parameter that describes how much noise these components contribute + to the brain. If you set the noise dict to matched then it will + fit the parameters to match the participant as best as possible. + + magnitude : list of floats + This specifies the size, in terms of the metric choosen below, + of the signal being generated. This can be a single number, + and thus apply to all signal timecourses, or it can be array and + thus different for each voxel. + + method : str + Select the procedure used to calculate the signal magnitude, + some of which are based on the definitions outlined in Welvaert & + Rosseel (2013): + - 'SFNR': Change proportional to the temporal variability, + as represented by the (desired) SFNR + - 'CNR_Amp/Noise-SD': Signal magnitude relative to the temporal + noise + - 'CNR_Amp2/Noise-Var_dB': Same as above but converted to decibels + - 'CNR_Signal-SD/Noise-SD': Standard deviation in signal + relative to standard deviation in noise + - 'CNR_Signal-Var/Noise-Var_dB': Same as above but converted to + decibels + - 'PSC': Calculate the percent signal change based on the + average activity of the noise (mean / 100 * magnitude) - Returns - ---------- - signal_function_scaled : 4d numpy array - The new signal volume with the appropriately set signal change + Returns + ---------- + signal_function_scaled : 4d numpy array + The new signal volume with the appropriately set signal change - """ + """ # If you have only one magnitude value, duplicate the magnitude for each # timecourse you have From 9bbd190e0c46f7d09949c16ca516ca15b36c8f67 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Mon, 28 May 2018 18:44:40 -0400 Subject: [PATCH 23/51] Minor wording edits --- brainiak/utils/fmrisim.py | 17 +- .../utils/fmrisim_multivariate_example.ipynb | 8307 ++++++++++++++++- 2 files changed, 8234 insertions(+), 90 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index cd1497843..7878b2496 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1411,19 +1411,14 @@ def _generate_noise_system(dimensions_tr, scanner. Generates a distribution with a SD of 1. If you look at the distribution of non-brain voxel intensity in modern scans you will see it is rician. However, depending on how you have calculated the SNR and - whether the template is being used you want to use this function - differently: the voxels in the non-brain are stable over time and + whether the template is being used you will want to use this function + differently: the voxels outside the brain tend to be stable over time and usually reflect structure in the MR signal (e.g. the baseline MR of the head coil or skull). Hence the template captures this rician noise structure. If you are adding the machine noise to the template, as is done in generate_noise, then you are likely doubling up - on the addition of machine noise. To correct for this you can instead - calculate the SNR with the baseline removed (give template_baseline the - template, which is the default) and then create machine noise as a - gaussian around this baseline. The residual noise is approximately - gaussian in regions far from the brain but becomes much more kurtotic - towards the brain centre, which is captured when you combine the - baseline with the gaussian. + on the addition of machine noise. In such cases, machine noise seems to + be better modelled by gaussian noise on top of this rician structure. Parameters ---------- @@ -2276,7 +2271,7 @@ def _noise_dict_update(noise_dict): if 'sfnr' not in noise_dict: noise_dict['sfnr'] = 90 if 'snr' not in noise_dict: - noise_dict['snr'] = 25 + noise_dict['snr'] = 50 if 'max_activity' not in noise_dict: noise_dict['max_activity'] = 1000 if 'voxel_size' not in noise_dict: @@ -2695,7 +2690,7 @@ def generate_noise(dimensions, iterations = [0, 0] if abs(noise_dict['auto_reg_rho'][0]) - abs(noise_dict['ma_rho'][0]) < 0.1: - logger.warning('ARMA coefs are close, may have troule fitting') + logger.warning('ARMA coefs are close, may have trouble fitting') # What are the dimensions of the volume, including time dimensions_tr = (dimensions[0], diff --git a/examples/utils/fmrisim_multivariate_example.ipynb b/examples/utils/fmrisim_multivariate_example.ipynb index a16b873f0..176ad9923 100644 --- a/examples/utils/fmrisim_multivariate_example.ipynb +++ b/examples/utils/fmrisim_multivariate_example.ipynb @@ -24,11 +24,11 @@ "# fMRI Simulator example script for multivariate analyses\n", "\n", "Example script to demonstrate fmrisim functionality. This generates\n", - "data for a two condition, event related design in which each condition\n", + "data for a two condition, event-related design in which each condition\n", "evokes different activity within the same voxels. It then runs simple \n", "univariate and multivariate analyses on the data\n", "\n", - "Authors: Cameron Ellis (Yale) 2017\n" + "Authors: Cameron Ellis (Yale) 2018\n" ] }, { @@ -49,9 +49,18 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.\n", + " from pandas.core import datetools\n" + ] + } + ], "source": [ "%matplotlib notebook\n", "\n", @@ -79,7 +88,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": { "collapsed": true }, @@ -101,9 +110,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(64, 64, 27, 294)\n" + ] + } + ], "source": [ "dim = volume.shape # What is the size of the volume\n", "dimsize = nii.header.get_zooms() # Get voxel dimensions from the nifti header\n", @@ -124,7 +141,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": { "collapsed": true }, @@ -145,17 +162,46 @@ "\n", "Now the disclaimers: the values here are only an estimate and will depend on noise properties combining in the ways assumed. In addition, because of the non-linearity and stochasticity of this simulation, this estimation is not fully invertible: if you generate a dataset with a set of noise parameters it will have similar but not the same noise parameters as a result. Moreover, complex interactions between brain regions that likely better describe brain noise are not modelled here: this toolbox pays no attention to regions of the brain or their interactions. Finally, for best results use raw fMRI because if the data has been preprocessed then assumptions this algorithm makes are likely to be erroneous. For instance, if the brain has been masked then this will eliminate variance in non-brain voxels which will mean that calculations of noise dependent on those voxels as a reference will fail.\n", "\n", - "This toolbox separates noise in two: spatial noise and temporal noise. To estimate spatial noise both the smoothness and the amount of non-brain noise of the data must be quantified. For smoothness, the Full Width Half Max (FWHM) of the volume is averaged for the X, Y and Z dimension and then averaged across a sample of time points. To calculate the Signal to Noise Ratio (SNR) the mean activity in brain voxels for the middle time point is divided by the standard deviation in activity across non-brain voxels for that time point. For temporal noise an Auto-regressive and moving average (ARMA) process is estimated, along with the overall size of temporal variability. A sample of brain voxels is used to estimate the first two AR components and the first MA component of each voxel's activity over time using the statsmodels package. The Signal to Fluctuation Noise Ratio (SFNR) is calculated by dividing the average activity of voxels in the brain with that voxel’s noise (Friedman & Glover, 2006). That noise is calculated by taking the standard deviation of that voxel over time after it has been detrended with a second order polynomial. The SFNR then controls the amount of functional variability. Other types of noise can be generated, such as physiological noise, but are not estimated by this function.\n" + "To ameliorate some of these concerns, it is possible to fit the spatial and temporal noise properties of the data. This iterates over the noise generation process and tunes parameters in order to match those that are provided. This is time consuming (especially for fitting the temporal noise) but is helpful in matching the specified noise properties. \n", + "\n", + "This toolbox separates noise in two: spatial noise and temporal noise. To estimate spatial noise both the smoothness and the amount of non-brain noise of the data must be quantified. For smoothness, the Full Width Half Max (FWHM) of the volume is averaged for the X, Y and Z dimension and then averaged across a sample of time points. To calculate the Signal to Noise Ratio (SNR) the mean activity in brain voxels for the middle time point is divided by the standard deviation in activity across non-brain voxels for that time point. For temporal noise an auto-regressive and moving average (ARMA) process is estimated, along with the overall size of temporal variability. A sample of brain voxels is used to estimate the first AR component and the first MA component of each voxel's activity over time using the statsmodels package. The Signal to Fluctuation Noise Ratio (SFNR) is calculated by dividing the average activity of voxels in the brain with that voxel’s noise (Friedman & Glover, 2006). That noise is calculated by taking the standard deviation of that voxel over time after it has been detrended with a second order polynomial. The SFNR then controls the amount of functional variability. Other types of noise can be generated, such as physiological noise, but are not estimated by this function.\n" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/base/model.py:496: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n", + " \"Check mle_retvals\", ConvergenceWarning)\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/tsa/kalmanf/kalmanfilter.py:649: RuntimeWarning: divide by zero encountered in true_divide\n", + " R_mat, T_mat)\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/tsa/tsatools.py:584: RuntimeWarning: overflow encountered in exp\n", + " newparams = ((1-np.exp(-params))/\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/tsa/tsatools.py:585: RuntimeWarning: overflow encountered in exp\n", + " (1+np.exp(-params))).copy()\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/tsa/tsatools.py:585: RuntimeWarning: invalid value encountered in true_divide\n", + " (1+np.exp(-params))).copy()\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/tsa/tsatools.py:586: RuntimeWarning: overflow encountered in exp\n", + " tmp = ((1-np.exp(-params))/\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/tsa/tsatools.py:587: RuntimeWarning: overflow encountered in exp\n", + " (1+np.exp(-params))).copy()\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/tsa/tsatools.py:587: RuntimeWarning: invalid value encountered in true_divide\n", + " (1+np.exp(-params))).copy()\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/base/model.py:473: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n", + " 'available', HessianInversionWarning)\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/base/model.py:496: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n", + " \"Check mle_retvals\", ConvergenceWarning)\n" + ] + } + ], "source": [ - "# Calculate the noise parameters from the data\n", - "noise_dict = {'voxel_size': [dimsize[0], dimsize[1], dimsize[2]]}\n", + "# Calculate the noise parameters from the data. Set it up to be matched.\n", + "noise_dict = {'voxel_size': [dimsize[0], dimsize[1], dimsize[2]], 'matched': 1}\n", "noise_dict = fmrisim.calc_noise(volume=volume,\n", " mask=mask,\n", " template=template,\n", @@ -165,9 +211,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Noise parameters of the data were estimated as follows:\n", + "SNR: 23.1756482\n", + "SFNR: 70.7171164885\n", + "FWHM: 5.65994469633\n" + ] + } + ], "source": [ "print('Noise parameters of the data were estimated as follows:')\n", "print('SNR: ' + str(noise_dict['snr']))\n", @@ -179,15 +236,26 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### **2. Generate signal**\n", + "### **2. Generate noise**\n", "fmrisim can generate realistic fMRI noise when supplied with the appropriate inputs. A single function receives these inputs and deals with generating the noise. The necessary inputs are described below; however, the steps performed by this function are also described in detail for clarity." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/cellis/anaconda/lib/python3.6/site-packages/scipy/stats/stats.py:2246: RuntimeWarning: invalid value encountered in true_divide\n", + " np.expand_dims(sstd, axis=axis))\n", + "/Users/cellis/anaconda/lib/python3.6/site-packages/statsmodels/base/model.py:496: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n", + " \"Check mle_retvals\", ConvergenceWarning)\n" + ] + } + ], "source": [ "# Calculate the noise given the parameters\n", "noise = fmrisim.generate_noise(dimensions=dim[0:3],\n", @@ -196,15 +264,817 @@ " mask=mask,\n", " template=template,\n", " noise_dict=noise_dict,\n", - " iterations=[50,0],\n", " )" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " fig.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " event.shiftKey = false;\n", + " // Send a \"J\" for go to next cell\n", + " event.which = 74;\n", + " event.keyCode = 74;\n", + " manager.command_mode();\n", + " manager.handle_keydown(event);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "(-0.5, 63.5, 63.5, -0.5)" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Plot spatial noise\n", "low_spatial = fmrisim._generate_noise_spatial(dim[0:3],\n", @@ -249,7 +1922,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "metadata": { "collapsed": true }, @@ -282,9 +1955,812 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " fig.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " event.shiftKey = false;\n", + " // Send a \"J\" for go to next cell\n", + " event.which = 74;\n", + " event.keyCode = 74;\n", + " manager.command_mode();\n", + " manager.handle_keydown(event);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "Text(0.5,0,'Activity difference')" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Dilate the mask so as to only take voxels far from the brain (performed in calc_noise)\n", "mask_dilated = ndimage.morphology.binary_dilation(mask, iterations=10)\n", @@ -340,6 +3624,7 @@ "plt.figure()\n", "plt.subplot(1, 3, 1)\n", "plt.hist(system_all[:,0].flatten(),100)\n", + "plt.title('Non-brain distribution')\n", "plt.xlabel('Activity')\n", "plt.ylabel('Frequency')\n", "\n", @@ -350,14 +3635,18 @@ "temporal = system_all[idxs[:100], :100]\n", "plt.subplot(1, 3, 2)\n", "plt.imshow(temporal)\n", - "plt.axis('off')\n", - "plt.title('Temporal')\n", + "plt.xticks([], [])\n", + "plt.yticks([], [])\n", + "plt.ylabel('voxel ID')\n", + "plt.xlabel('time')\n", + "plt.title('Voxel x time')\n", "\n", "# Plot the difference\n", "ax=plt.subplot(1, 3, 3)\n", "plt.hist(system_baseline[:,0].flatten(),100)\n", "ax.yaxis.tick_right()\n", "ax.yaxis.set_label_position(\"right\")\n", + "plt.title('Demeaned non-brain distribution')\n", "plt.xlabel('Activity difference')" ] }, @@ -378,21 +3667,21 @@ "\n", "The generate_noise function does its best to estimate the appropriate noise parameters using assumptions about noise sources; however, because of the complexity of these different noise types, it is often wrong. To compensate, fitting is performed in which parameters involved in the noise generation process are changed and the noise metrics are recalculated to see whether those changes helped the fit. Due to their importance, the parameters that can be fit are SNR, SFNR and AR.\n", "\n", - "The fitting of SNR/SFNR involves reweighting spatial and temporal metrics of noise. This analysis is relatively quick because this reweighting does not require that any timecourses are recreated, only that they are reweighted. At least 10 iterations are recommended because the initial guesses tend to underestimate SFNR and SNR (although the size of this error depends on the data). In the case of fitting the AR, the MA rho is adjusted until the AR is appropriate and in doing so the timecourse needs to be recreated for each iteration. The default number of AR iterations is 0 because by default these values are in an appropriate range. However, at least 10 iterations are needed if you wish to match your output data to the exact input data.\n", + "The fitting of SNR/SFNR involves reweighting spatial and temporal metrics of noise. This analysis is relatively quick because this reweighting does not require that any timecourses are recreated, only that they are reweighted. At least 10 iterations are recommended because the initial guesses tend to underestimate SFNR and SNR (although the size of this error depends on the data). In the case of fitting the AR, the MA rho is adjusted until the AR is appropriate and in doing so the timecourse needs to be recreated for each iteration. In the noise_dict, one of the keys is 'matched' which is a binary value determining whether any fitting will be done\n", "\n", - "In terms of timing, for a medium size dataset (64x64x27x294 voxels) it takes approximately 23s to generate the data with 0 iterations on a Mac 2014 laptop. For every iteration of fitting the SNR/SFNR, it takes an additional 3s and for every additional iteration of fitting AR it takes an additional 10s (these combine linearly)." + "In terms of timing, for a medium size dataset (64x64x27x300 voxels) it takes approximately 2 minutes to generate the data when fitting on a Mac 2014 laptop. " ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 26, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# Compute the noise parameters for the simulated noise\n", - "noise_dict_sim = {'voxel_size': [dimsize[0], dimsize[1], dimsize[2]]}\n", + "noise_dict_sim = {'voxel_size': [dimsize[0], dimsize[1], dimsize[2]], 'matched': 1}\n", "noise_dict_sim = fmrisim.calc_noise(volume=noise,\n", " mask=mask,\n", " template=template,\n", @@ -402,23 +3691,27 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print('SNR: %0.2f vs %0.2f' % (noise_dict['snr'], noise_dict_sim['snr']))" - ] - }, - { - "cell_type": "code", - "execution_count": null, + "execution_count": 29, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Compare noise parameters for the real and simulated noise:\n", + "SNR: 23.18 vs 23.26\n", + "SFNR: 70.72 vs 68.53\n", + "FWHM: 5.66 vs 5.76\n", + "AR: 0.82 vs 0.84\n" + ] + } + ], "source": [ "print('Compare noise parameters for the real and simulated noise:')\n", "print('SNR: %0.2f vs %0.2f' % (noise_dict['snr'], noise_dict_sim['snr']))\n", "print('SFNR: %0.2f vs %0.2f' % (noise_dict['sfnr'], noise_dict_sim['sfnr']))\n", - "print('FWHM: %0.2f vs %0.2f' % (noise_dict['fwhm'], noise_dict_sim['fwhm']))" + "print('FWHM: %0.2f vs %0.2f' % (noise_dict['fwhm'], noise_dict_sim['fwhm']))\n", + "print('AR: %0.2f vs %0.2f' % (noise_dict['auto_reg_rho'][0], noise_dict_sim['auto_reg_rho'][0]))" ] }, { @@ -427,7 +3720,7 @@ "source": [ "### **3. Generate signal**\n", "\n", - "fmrisim can be used to generate signal in a number of different ways depending on the type of effect being simulated. Several tools are supplied to help with different types of signal that may be required; however, custom scripts may be necessary for unique effects. Below an experiment will be simulated in which two conditions, A and B, evoke different patterns of activity in the same set of voxels in the brain. This pattern does not manifest as a univariate change in voxel activity across voxels but instead each condition evokes a consistent pattern across voxels. These conditions are randomly intermixed trial by trial. This code could be easily changed to instead compare univariate changes evoked by stimuli in different brain regions. " + "fmrisim can be used to generate signal in a number of different ways depending on the type of effect being simulated. Several tools are supplied to help with different types of signal that may be required; however, custom scripts may be necessary for unique effects. Below an experiment will be simulated in which two conditions, A and B, evoke different patterns of activity in the same set of voxels in the brain. This pattern does not manifest as a uniform change in activity across voxels but instead each condition evokes a consistent pattern across voxels. These conditions are randomly intermixed trial by trial. This code could be easily changed to instead compare univariate changes evoked by stimuli in different brain regions. " ] }, { @@ -441,7 +3734,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 30, "metadata": { "collapsed": true }, @@ -460,9 +3753,812 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 31, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " fig.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " event.shiftKey = false;\n", + " // Send a \"J\" for go to next cell\n", + " event.which = 74;\n", + " event.keyCode = 74;\n", + " manager.command_mode();\n", + " manager.handle_keydown(event);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/cellis/anaconda/lib/python3.6/site-packages/matplotlib/cbook/deprecation.py:107: MatplotlibDeprecationWarning: Passing one of 'on', 'true', 'off', 'false' as a boolean is deprecated; use an actual boolean (True/False) instead.\n", + " warnings.warn(message, mplDeprecation, stacklevel=1)\n" + ] + }, + { + "data": { + "text/plain": [ + "Text(0.5,0,'Condition B')" + ] + }, + "execution_count": 42, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Plot pattern of activity for each condition\n", "plt.figure()\n", @@ -526,7 +5433,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 34, "metadata": { "collapsed": true }, @@ -548,7 +5455,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 35, "metadata": { "collapsed": true }, @@ -579,7 +5486,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 36, "metadata": { "collapsed": true }, @@ -608,12 +5515,12 @@ "source": [ "*3.5 Estimate the voxel weight for each event*\n", "\n", - "According to the logic of this example, each signal voxel will respond a different amount for condition A and B, but this amount will also differ across voxels. To simulate this we multiply a voxel’s response to each condition by the time course of events and then combine these conditions time courses to make a single time course. This time course describes each voxel’s response to stimuli over time." + "According to the logic of this example, each voxel carrying signal will respond a different amount for condition A and B. To simulate this we multiply a voxel’s response to each condition by the time course of events and then combine these to make a single time course. This time course describes each voxel’s response to signal over time." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 37, "metadata": { "collapsed": true }, @@ -628,18 +5535,834 @@ "stimfunc_weighted = stimfunc_weighted.transpose()" ] }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " fig.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " event.shiftKey = false;\n", + " // Send a \"J\" for go to next cell\n", + " event.which = 74;\n", + " event.keyCode = 74;\n", + " manager.command_mode();\n", + " manager.handle_keydown(event);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "Text(0.5,0,'nth TR')" + ] + }, + "execution_count": 45, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Prepare the data to be plotted\n", "response = signal_func[0:100,0] * 2\n", @@ -680,12 +7206,12 @@ "source": [ "*3.7 Establish signal magnitude*\n", "\n", - "When specifying the signal we must determine the amount of activity change each voxel undergoes. fmrisim contains a tool to allow you to choose between a variety of different metrics that you could use to scale the signal. For instance, we can calculate percent signal change (referred to as PSC) by taking the average activity of voxels in an ROI of the noise volume and multiplying it by a proportion to signal the percentage change that this signal maximally evokes. This metric doesn't take account of the variance in the noise but other metrics available do. One metric that does take account of variance, and is used below, is the signal amplitude divided by the temporal variability. The choices that are available for computing the signal scale are based on Welvaert and Rosseel (2013)." + "When specifying the signal we must determine the amount of activity change each voxel undergoes. fmrisim contains a tool to allow you to choose between a variety of different metrics that you could use to scale the signal. For instance, we can calculate percent signal change (referred to as PSC) by taking the average activity of a voxel in the noise volume and multiplying the maximal activation of the signal by a percentage of this number. This metric doesn't take into account the variance in the noise but other metrics in this tool do. One metric that does take account of variance, and is used below, is the signal amplitude divided by the temporal variability. The choices that are available for computing the signal scale are based on Welvaert and Rosseel (2013)." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 46, "metadata": { "collapsed": true }, @@ -704,7 +7230,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 47, "metadata": { "collapsed": true }, @@ -730,7 +7256,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 48, "metadata": { "collapsed": true }, @@ -752,7 +7278,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 49, "metadata": { "collapsed": true }, @@ -767,7 +7293,7 @@ "source": [ "### **4. Analyse data**\n", "\n", - "Several tools are available for multivariate analysis in BrainIAK. These greatly speed up computation and are critical in some cases, such as a whole brain searchlight. However, for this example data we will only look at data in the ROI that we know contains signal." + "Several tools are available for multivariate analysis in BrainIAK. These greatly speed up computation and are critical in some cases, such as a whole brain searchlight. However, for this example data we will only look at data in the ROI that we know contains signal and so do not need these advanced tools optimized for whole-brain analyses." ] }, { @@ -781,7 +7307,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 50, "metadata": { "collapsed": true }, @@ -804,9 +7330,812 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 51, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " fig.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " event.shiftKey = false;\n", + " // Send a \"J\" for go to next cell\n", + " event.which = 74;\n", + " event.keyCode = 74;\n", + " manager.command_mode();\n", + " manager.handle_keydown(event);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "Text(0.5,1,'Low Dimensional Representation of conditions A and B')" + ] + }, + "execution_count": 52, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Calculate the distance matrix between trial types\n", "distance_matrix = sp_distance.squareform(sp_distance.pdist(np.vstack([trials_A.transpose(), trials_B.transpose()])))\n", @@ -858,9 +8990,18 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 53, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mean difference between condition A and B: 1.57\n", + "p value: 0.489\n" + ] + } + ], "source": [ "mean_difference = (np.mean(trials_A,0) - np.mean(trials_B,0))\n", "ttest = stats.ttest_1samp(mean_difference, 0)\n", @@ -879,9 +9020,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 54, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Classification accuracy between condition A and B: 1.000\n" + ] + } + ], "source": [ "# Get the inputs to the SVM\n", "input_mat = np.vstack([trials_A.transpose(), trials_B.transpose()])\n", From 9a83a8193cb67e5c5bf46c26ad454bc3e55943ae Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Mon, 28 May 2018 18:45:00 -0400 Subject: [PATCH 24/51] Increased test coverage for fitting --- tests/utils/test_fmrisim.py | 46 +++++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index 280776af6..8d7830fe8 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -303,21 +303,59 @@ def test_calc_noise(): # Mask the volume to be the same shape as a brain mask, template = sim.mask_brain(dimensions_tr, mask_self=None) stimfunction_tr = stimfunction[::int(tr_duration * temporal_res)] + + nd_orig['matched'] = 0 noise = sim.generate_noise(dimensions=dimensions_tr[0:3], stimfunction_tr=stimfunction_tr, tr_duration=tr_duration, template=template, mask=mask, noise_dict=nd_orig, - iterations=[10, 0], ) + # Check the spatial noise match + nd_orig['matched'] = 1 + noise_matched = sim.generate_noise(dimensions=dimensions_tr[0:3], + stimfunction_tr=stimfunction_tr, + tr_duration=tr_duration, + template=template, + mask=mask, + noise_dict=nd_orig, + iterations=[50, 0] + ) + # Calculate the noise parameters from this newly generated volume nd_new = sim.calc_noise(noise, mask, template) + nd_matched = sim.calc_noise(noise_matched, mask, template) + + # Check the values are reasonable" + assert nd_new['snr'] > 0, 'snr out of range' + assert nd_new['sfnr'] > 0, 'sfnr out of range' + assert nd_new['auto_reg_rho'][0] > 0, 'ar out of range' + # Check that the fitting worked snr_diff = abs(nd_orig['snr'] - nd_new['snr']) - assert snr_diff < 10, 'snr calculated incorrectly' + snr_diff_match = abs(nd_orig['snr'] - nd_matched['snr']) + assert snr_diff > snr_diff_match, 'snr fit incorrectly' + + # Check the temporal noise match + nd_orig['matched'] = 1 + noise_matched = sim.generate_noise(dimensions=dimensions_tr[0:3], + stimfunction_tr=stimfunction_tr, + tr_duration=tr_duration, + template=template, + mask=mask, + noise_dict=nd_orig, + iterations=[0, 50] + ) + + nd_matched = sim.calc_noise(noise_matched, mask, template) + sfnr_diff = abs(nd_orig['sfnr'] - nd_new['sfnr']) - assert sfnr_diff < 10, 'sfnr calculated incorrectly' + sfnr_diff_match = abs(nd_orig['sfnr'] - nd_matched['sfnr']) + assert sfnr_diff > sfnr_diff_match, 'sfnr fit incorrectly' + ar1_diff = abs(nd_orig['auto_reg_rho'][0] - nd_new['auto_reg_rho'][0]) - assert ar1_diff < 1, 'AR1 calculated incorrectly' \ No newline at end of file + ar1_diff_match = abs(nd_orig['auto_reg_rho'][0] - nd_matched[ + 'auto_reg_rho'][0]) + assert ar1_diff > ar1_diff_match, 'AR1 fit incorrectly' \ No newline at end of file From 20a9ac38916ae1e6e1807497da5c94c6eed57d7c Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Mon, 4 Jun 2018 22:38:49 -0400 Subject: [PATCH 25/51] Added a warning --- brainiak/utils/fmrisim.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 7878b2496..e0d6711c5 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -828,7 +828,7 @@ def convolve_hrf(stimfunction, Parameters ---------- - stimfunction : timepoint by timecourse array + stimfunction : timepoint by feature array What is the time course of events to be modelled in this experiment. This can specify one or more timecourses of events. The events can be weighted or binary @@ -856,6 +856,11 @@ def convolve_hrf(stimfunction, columns in this array. """ + + # Check if it is timepoint by feature + if stimfunction.shape[0] < stimfunction.shape[1]: + logger.warning('Stimfunction may be the wrong shape') + # How will stimfunction be resized stride = int(temporal_resolution * tr_duration) duration = int(stimfunction.shape[0] / stride) From 6ee9999b33ab597005bf0705321a1475bfbe7efa Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 5 Jun 2018 13:24:29 -0400 Subject: [PATCH 26/51] PEP8 --- brainiak/utils/fmrisim.py | 143 +++++++++++++++++--------------------- 1 file changed, 65 insertions(+), 78 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index a86e40230..137c19468 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -643,25 +643,25 @@ def export_epoch_file(stimfunction, # Cycle through the participants, different entries in the list epoch_file = [0] * len(stimfunction) - for participant_counter in range(len(stimfunction)): + for ppt_counter in range(len(stimfunction)): # What is the time course for the participant (binarized) - stimfunction_ppt = np.abs(stimfunction[participant_counter]) > 0 + stimfunction_ppt = np.abs(stimfunction[ppt_counter]) > 0 # Down sample the stim function stride = tr_duration * temporal_resolution stimfunction_downsampled = stimfunction_ppt[::int(stride), :] - # Calculates the number of event onsets. This uses changes in value to reflect - # different epochs. This might be false in some cases (the - # weight is non-uniform over an epoch or there is no - # break between identically weighted epochs). + # Calculates the number of event onsets. This uses changes in value + # to reflect different epochs. This might be false in some cases (the + # weight is non-uniform over an epoch or there is no break between + # identically weighted epochs). epochs = 0 # Preset conditions = stimfunction_ppt.shape[1] for condition_counter in range(conditions): - weight_change = ( - np.diff(stimfunction_downsampled[:, condition_counter], 1, 0) != 0) + weight_change = (np.diff(stimfunction_downsampled[:, + condition_counter], 1, 0) != 0) # If the first or last events are 'on' then make these # represent a epoch change @@ -676,8 +676,7 @@ def export_epoch_file(stimfunction, trs = stimfunction_downsampled.shape[0] # Make a timing file for this participant - epoch_file[participant_counter] = np.zeros((conditions, - epochs, trs)) + epoch_file[ppt_counter] = np.zeros((conditions, epochs, trs)) # Cycle through conditions epoch_counter = 0 # Reset and count across conditions @@ -691,9 +690,8 @@ def export_epoch_file(stimfunction, stimfunction_downsampled[ tr_counter, condition_counter] == 1: # Add a one for this TR - epoch_file[participant_counter][condition_counter, - epoch_counter, - tr_counter] = 1 + epoch_file[ppt_counter][condition_counter, + epoch_counter, tr_counter] = 1 # Find the next non event value end_idx = np.where(stimfunction_downsampled[tr_counter:, @@ -702,9 +700,8 @@ def export_epoch_file(stimfunction, tr_idxs = list(range(tr_counter, tr_counter + end_idx)) # Add ones to all the trs within this event time frame - epoch_file[participant_counter][condition_counter, - epoch_counter, - tr_idxs] = 1 + epoch_file[ppt_counter][condition_counter, + epoch_counter, tr_idxs] = 1 # Start from this index tr_counter += end_idx @@ -716,7 +713,7 @@ def export_epoch_file(stimfunction, tr_counter += 1 # Convert to boolean - epoch_file[participant_counter] = epoch_file[participant_counter].astype('bool') + epoch_file[ppt_counter] = epoch_file[ppt_counter].astype('bool') # Save the file np.save(filename, epoch_file) @@ -1246,7 +1243,7 @@ def _calc_ARMA_noise(volume, sample_num : int How many voxels would you like to sample to calculate the AR values. The AR distribution of real data is approximately exponential maxing - at 1. From analyses across a number of participants, to get less + at 1. From analyses across a number of participants, to get less than 1% standard deviation of error from the true mean it is necessary to sample at least 1000 voxels. @@ -1290,7 +1287,7 @@ def _calc_ARMA_noise(volume, model = ARMA(demeaned_timecourse, [auto_reg_order, ma_order]) model_fit = model.fit(disp=False) params = model_fit.params - except: + except ValueError: params = np.ones(auto_reg_order + ma_order + 1) * np.nan # Add to the list @@ -1298,12 +1295,13 @@ def _calc_ARMA_noise(volume, ma_all[voxel_counter, :] = params[auto_reg_order + 1:] # Average all of the values and then convert them to a list - auto_reg_rho =np.nanmean(auto_reg_rho_all, 0).tolist() + auto_reg_rho = np.nanmean(auto_reg_rho_all, 0).tolist() ma_rho = np.nanmean(ma_all, 0).tolist() # Return the coefficients return auto_reg_rho, ma_rho + def calc_noise(volume, mask, template, @@ -1687,10 +1685,9 @@ def _generate_noise_temporal_autoregression(timepoints, # This code assumes that the AR order is higher than the MA order if ma_order > auto_reg_order: - err_str = 'MA order (' + str(ma_order) +') is greater than AR order ' \ - '('+ str(auto_reg_order) + \ - '). Cannot run.' - raise ValueError(err_str) + msg = 'MA order (%d) is greater than AR order (%d). Cannot run.' % ( + ma_order, auto_reg_order) + raise ValueError(msg) # Generate a random variable at each time point that is a decayed value # of the previous time points @@ -1702,9 +1699,9 @@ def _generate_noise_temporal_autoregression(timepoints, # Create a brain shaped volume with appropriate smoothing properties noise = _generate_noise_spatial(dimensions=dimensions, - mask=mask, - fwhm=noise_dict['fwhm'], - ) + mask=mask, + fwhm=noise_dict['fwhm'], + ) if tr_counter == 0: noise_autoregression[:, :, :, tr_counter] = noise @@ -1920,8 +1917,8 @@ def _Pk2(idxs, sigma): amplitude of the fft coefficients """ - # The first set of idxs ought to be zero so make the first value zero to - # avoid a divide by zero error + # The first set of idxs ought to be zero so make the first value + # zero to avoid a divide by zero error amp_start = np.array((0)) # Compute the amplitude of the function for a series of indices @@ -2047,7 +2044,7 @@ def _generate_noise_temporal(stimfunction_tr, # Calculate the physiological time course noise = _generate_noise_temporal_phys(timepoints, - ) + ) # Create a brain shaped volume with similar smoothing properties volume = _generate_noise_spatial(dimensions=dimensions, @@ -2256,34 +2253,22 @@ def _noise_dict_update(noise_dict): Updated dictionary """ + # Create the default dictionary + default_dict = {'task_sigma': 0, 'drift_sigma':0,'auto_reg_sigma': 1, + 'auto_reg_rho' : [0.5], 'ma_rho': [0.0], + 'physiological_sigma': 0, 'sfnr': 90, 'snr': 50, + 'max_activity': 1000, 'voxel_size': [1.0, 1.0, 1.0], + 'fwhm': 4, 'matched': 1} + + # Get the default keys + default_keys = default_dict.keys() # Check what noise is in the dictionary and add if necessary. Numbers # determine relative proportion of noise - if 'task_sigma' not in noise_dict: - noise_dict['task_sigma'] = 0 - if 'drift_sigma' not in noise_dict: - noise_dict['drift_sigma'] = 0 - if 'auto_reg_sigma' not in noise_dict: - noise_dict['auto_reg_sigma'] = 1 - if 'auto_reg_rho' not in noise_dict: - noise_dict['auto_reg_rho'] = [0.5] - if 'ma_rho' not in noise_dict: - noise_dict['ma_rho'] = [0.0] - if 'physiological_sigma' not in noise_dict: - noise_dict['physiological_sigma'] = 0 - if 'sfnr' not in noise_dict: - noise_dict['sfnr'] = 90 - if 'snr' not in noise_dict: - noise_dict['snr'] = 50 - if 'max_activity' not in noise_dict: - noise_dict['max_activity'] = 1000 - if 'voxel_size' not in noise_dict: - noise_dict['voxel_size'] = [1.0, 1.0, 1.0] - if 'fwhm' not in noise_dict: - noise_dict['fwhm'] = 4 - if 'matched' not in noise_dict: - noise_dict['matched'] = 0 + for default_key in default_dict: + if default_key not in noise_dict: + noise_dict[default_key] = default_dict[default_key] return noise_dict @@ -2315,8 +2300,9 @@ def _fit_spatial(noise, What is the duration, in seconds, of each TR? template : 3d array, float - A continuous (0 -> 1) volume describing the likelihood a voxel is in - the brain. This can be used to contrast the brain and non brain. + A continuous (0 -> 1) volume describing the likelihood a voxel + is in the brain. This can be used to contrast the brain and non + brain. mask : 3d array, binary The mask of the brain volume, distinguishing brain from non-brain @@ -2340,8 +2326,8 @@ def _fit_spatial(noise, fit the parameters to match the participant as best as possible. fit_thresh : float - What proportion of the target parameter value is sufficient error to - warrant finishing fit search. + What proportion of the target parameter value is sufficient + error to warrant finishing fit search. fit_delta : float How much are the parameters attenuated during the fitting process, @@ -2349,11 +2335,11 @@ def _fit_spatial(noise, parameter and the actual parameter iterations : int - The first element is how many steps of fitting the SFNR and SNR values - will be performed. Usually converges after < 5. The second element - is the number of iterations for the AR fitting. This is much more - time consuming (has to make a new timecourse on each iteration) so - be careful about setting this appropriately. + The first element is how many steps of fitting the SFNR and SNR + values will be performed. Usually converges after < 5. The + second element is the number of iterations for the AR fitting. + This is much more time consuming (has to make a new timecourse + on each iteration) so be careful about setting this appropriately. Returns ------- @@ -2445,8 +2431,9 @@ def _fit_temporal(noise, The mask of the brain volume, distinguishing brain from non-brain template : 3d array, float - A continuous (0 -> 1) volume describing the likelihood a voxel is in - the brain. This can be used to contrast the brain and non brain. + A continuous (0 -> 1) volume describing the likelihood a voxel + is in the brain. This can be used to contrast the brain and non + brain. stimfunction_tr : Iterable, list When do the stimuli events occur. Each element is a TR @@ -2459,11 +2446,11 @@ def _fit_temporal(noise, generated temporal_proportion, float - What is the proportion of the temporal variance (as specified by the - SFNR noise parameter) that is accounted for by the system noise. If - this number is high then all of the temporal variability is due to - system noise, if it is low then all of the temporal variability is - due to brain variability. + What is the proportion of the temporal variance (as specified by + the SFNR noise parameter) that is accounted for by the system + noise. If this number is high then all of the temporal + variability is due to system noise, if it is low then all of the + temporal variability is due to brain variability. temporal_sd : float What is the standard deviation in time of the noise volume to be @@ -2480,8 +2467,8 @@ def _fit_temporal(noise, fit the parameters to match the participant as best as possible. fit_thresh : float - What proportion of the target parameter value is sufficient error to - warrant finishing fit search. + What proportion of the target parameter value is sufficient + error to warrant finishing fit search. fit_delta : float How much are the parameters attenuated during the fitting process, @@ -2546,12 +2533,11 @@ def _fit_temporal(noise, # If the SFNR and AR is sufficiently close then break the loop if (abs(ar_diff) / target_ar) < fit_thresh and sfnr_diff < fit_thresh: - logger.info('Terminated AR fit after ' + str(iteration) + - ' iterations.') + msg = 'Terminated AR fit after ' + str(iteration) + ' iterations.' + logger.info(msg) break ## Otherwise update the noise metrics - # Get the new temporal noise value temp_sd_new = mean_signal / new_sfnr temporal_sd -= ((temp_sd_new - temp_sd_orig) * fit_delta) @@ -2600,6 +2586,7 @@ def _fit_temporal(noise, # Return the updated noise return noise + def generate_noise(dimensions, stimfunction_tr, tr_duration, @@ -2737,7 +2724,6 @@ def generate_noise(dimensions, spat_sd = mean_signal / noise_dict['snr'] spatial_sd = np.sqrt((spat_sd ** 2) * (1 - temporal_proportion)) - # Set up the machine noise noise_system = _generate_noise_system(dimensions_tr=dimensions_tr, spatial_sd=spatial_sd, @@ -2745,8 +2731,7 @@ def generate_noise(dimensions, ) # Sum up the noise of the brain - noise = base + (noise_temporal * (1 - temporal_sd)) + \ - noise_system + noise = base + (noise_temporal * (1 - temporal_sd)) + noise_system # Reject negative values (only happens outside of the brain) noise[noise < 0] = 0 @@ -2782,6 +2767,7 @@ def generate_noise(dimensions, # Return the noise return noise + def compute_signal_change(signal_function, noise_function, noise_dict, @@ -2934,6 +2920,7 @@ def compute_signal_change(signal_function, # Return the scaled time course return signal_function_scaled + def plot_brain(fig, brain, mask=None, From f9676c97a033b8d206c1fac8b4b162c07ae06dd1 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 5 Jun 2018 14:08:14 -0400 Subject: [PATCH 27/51] PEP8 --- brainiak/utils/fmrisim.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 137c19468..1cf77ee98 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -2254,18 +2254,14 @@ def _noise_dict_update(noise_dict): """ # Create the default dictionary - default_dict = {'task_sigma': 0, 'drift_sigma':0,'auto_reg_sigma': 1, - 'auto_reg_rho' : [0.5], 'ma_rho': [0.0], + default_dict = {'task_sigma': 0, 'drift_sigma': 0, 'auto_reg_sigma': 1, + 'auto_reg_rho': [0.5], 'ma_rho': [0.0], 'physiological_sigma': 0, 'sfnr': 90, 'snr': 50, 'max_activity': 1000, 'voxel_size': [1.0, 1.0, 1.0], 'fwhm': 4, 'matched': 1} - # Get the default keys - default_keys = default_dict.keys() - # Check what noise is in the dictionary and add if necessary. Numbers # determine relative proportion of noise - for default_key in default_dict: if default_key not in noise_dict: noise_dict[default_key] = default_dict[default_key] @@ -2537,8 +2533,7 @@ def _fit_temporal(noise, logger.info(msg) break - ## Otherwise update the noise metrics - # Get the new temporal noise value + # Otherwise update the noise metrics. Get the new temporal noise value temp_sd_new = mean_signal / new_sfnr temporal_sd -= ((temp_sd_new - temp_sd_orig) * fit_delta) From 2a32b598219ae7f62e8b52637555dabbb85e76ac Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 5 Jun 2018 14:16:40 -0400 Subject: [PATCH 28/51] PEP8 --- tests/utils/test_fmrisim.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index 8d7830fe8..49e99e283 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -286,7 +286,7 @@ def test_calc_noise(): event_durations = [6] tr_duration = 2 duration = 200 - temporal_res=100 + temporal_res = 100 tr_number = int(np.floor(duration / tr_duration)) dimensions_tr = np.array([10, 10, 10, tr_number]) @@ -358,4 +358,4 @@ def test_calc_noise(): ar1_diff = abs(nd_orig['auto_reg_rho'][0] - nd_new['auto_reg_rho'][0]) ar1_diff_match = abs(nd_orig['auto_reg_rho'][0] - nd_matched[ 'auto_reg_rho'][0]) - assert ar1_diff > ar1_diff_match, 'AR1 fit incorrectly' \ No newline at end of file + assert ar1_diff > ar1_diff_match, 'AR1 fit incorrectly' From be7253d81dcaef5f845483e53a3191df497c0e9c Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Wed, 6 Jun 2018 19:13:11 -0400 Subject: [PATCH 29/51] Removed plot_brain because other tools like nilearn do a much better job at plotting data. Minor edits and tweaks based on testing --- brainiak/utils/fmrisim.py | 128 +++++--------------------------------- 1 file changed, 15 insertions(+), 113 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 1cf77ee98..6fc4680dc 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -70,11 +70,6 @@ Convert the signal function into useful metric units according to metrics used by others (Welvaert & Rosseel, 2013) -plot_brain -Display the brain, timepoint by timepoint, with above threshold voxels -highlighted against the outline of the brain. - - Authors: Cameron Ellis (Princeton) 2016-2017 Chris Baldassano (Princeton) 2016-2017 @@ -1129,7 +1124,6 @@ def _calc_snr(volume, mask, dilation=5, tr=None, - template_baseline=None, ): """ Calculate the the SNR of a volume Calculates the Signal to Noise Ratio, the mean of brain voxels @@ -1158,18 +1152,6 @@ def _calc_snr(volume, tr : int Integer specifying TR to calculate the SNR for - template_baseline : 3d array, float - Do you want to subtract the baseline (a.k.a. temporal mean - activation, the template), to observe the noise in addition to this - in peripherial regions? Using this procedure means that only noise - that is not average is modeled. It is particularly valuable for the - simulator because the machine noise is added to the template. - However, the noise profile is different when you subtract (it - becomes somewhat gaussian). Moreover, the amount of noise - perturbation reflects the baseline value (voxels that are high on - average across time have more noise than voxels that have low - averages), If this is 'None' then this subtraction won't occur. - Returns ------- @@ -1199,11 +1181,6 @@ def _calc_snr(volume, brain_voxels = np.mean(brain_voxels, 1) nonbrain_voxels = np.mean(nonbrain_voxels, 3) - # # Do you want to remove the average of the periphery (removes - # # structure, leaving only variability) - # if template_baseline is not None: - # nonbrain_voxels -= template_baseline - nonbrain_voxels = nonbrain_voxels[mask_dilated == 0] # Take the means of each voxel over time @@ -1344,7 +1321,7 @@ def calc_noise(volume, # Create the mask if not supplied and set the mask size if mask is None: - mask = np.ones(volume.shape[:-1]) + raise ValueError('Mask not supplied') # Update noise dict if it is not yet created if noise_dict is None: @@ -1394,7 +1371,6 @@ def calc_noise(volume, noise_dict['fwhm'] = np.mean(fwhm) noise_dict['snr'] = _calc_snr(volume, mask, - template_baseline=template, ) # Return the noise dictionary @@ -1543,7 +1519,7 @@ def _generate_noise_temporal_task(stimfunction_tr, noise_task = stimfunction_tr + noise # Normalize - noise_task = stats.zscore(noise_task) + noise_task = stats.zscore(noise_task).flatten() return noise_task @@ -1634,7 +1610,6 @@ def _generate_noise_temporal_drift(trs, def _generate_noise_temporal_autoregression(timepoints, noise_dict, dimensions, - template, mask, ): @@ -1661,10 +1636,6 @@ def _generate_noise_temporal_autoregression(timepoints, dimensions : 3 length array, int What is the shape of the volume to be generated - template : 3d array, float - A continuous (0 -> 1) volume describing the likelihood a voxel is in - the brain. This can be used to contrast the brain and non brain. - mask : 3 dimensional array, binary The masked brain, thresholded to distinguish brain and non-brain @@ -1761,10 +1732,10 @@ def _generate_noise_temporal_phys(timepoints, What time points, in seconds, are sampled by a TR resp_freq : float - What is the frequency of respiration + What is the frequency of respiration (in s) heart_freq : float - What is the frequency of heart beat + What is the frequency of heart beat (in s) Returns ---------- @@ -1773,18 +1744,20 @@ def _generate_noise_temporal_phys(timepoints, """ - noise_phys = [] # Preset resp_phase = (np.random.rand(1) * 2 * np.pi)[0] heart_phase = (np.random.rand(1) * 2 * np.pi)[0] - for tr_counter in timepoints: - # Calculate the radians for each variable at this - # given TR - resp_radians = resp_freq * tr_counter * 2 * np.pi + resp_phase - heart_radians = heart_freq * tr_counter * 2 * np.pi + heart_phase + # Find the rate for each timepoint + resp_rate = (resp_freq * 2 * np.pi) + heart_rate = (heart_freq * 2 * np.pi) - # Combine the two types of noise and append - noise_phys.append(np.cos(resp_radians) + np.sin(heart_radians)) + # Calculate the radians for each variable at this + # given TR + resp_radians = np.multiply(timepoints, resp_rate) + resp_phase + heart_radians = np.multiply(timepoints, heart_rate) + heart_phase + + # Combine the two types of noise and append + noise_phys = np.cos(resp_radians) + np.sin(heart_radians) # Normalize noise_phys = stats.zscore(noise_phys) @@ -2063,7 +2036,6 @@ def _generate_noise_temporal(stimfunction_tr, noise = _generate_noise_temporal_autoregression(timepoints, noise_dict, dimensions, - template, mask, ) @@ -2358,7 +2330,7 @@ def _fit_spatial(noise, for iteration in list(range(iterations)): # Calculate the new metrics - new_snr = _calc_snr(noise, mask, template_baseline=template) + new_snr = _calc_snr(noise, mask) # Calculate the difference between the real and simulated data diff_snr = abs(new_snr - target_snr) / target_snr @@ -2915,73 +2887,3 @@ def compute_signal_change(signal_function, # Return the scaled time course return signal_function_scaled - -def plot_brain(fig, - brain, - mask=None, - percentile=99, - ): - """ Display the brain that has been generated with a given threshold - Will display the voxels above the given percentile and then a shadow of - all voxels in the mask - - Parameters - ---------- - - fig : matplotlib object - The figure to be displayed, generated from matplotlib. import - matplotlib.pyplot as plt; fig = plt.figure() - - brain : 3d array - This is a 3d array with the neural data - - mask : 3d array - A binary mask describing the location that you want to specify as - - percentile : float - What percentage of voxels will be included? Based on the values - supplied - - Returns - ---------- - ax : matplotlib object - Object with the information to be plotted - - """ - - ax = fig.add_subplot(111, projection='3d') - - # Threshold the data - threshold = np.percentile(brain.reshape(np.prod(brain.shape[0:3])), - percentile) - - # How many voxels exceed a threshold - brain_threshold = np.where(np.abs(brain) > threshold) - - # Clear the way - ax.clear() - - ax.set_xlim(0, brain.shape[0]) - ax.set_ylim(0, brain.shape[1]) - ax.set_zlim(0, brain.shape[2]) - - # If a mask is provided then plot this - if mask is not None: - mask_threshold = np.where(np.abs(mask) > 0) - ax.scatter(mask_threshold[0], - mask_threshold[1], - mask_threshold[2], - zdir='z', - c='black', - s=10, - alpha=0.01) - - # Plot the volume - ax.scatter(brain_threshold[0], - brain_threshold[1], - brain_threshold[2], - zdir='z', - c='red', - s=20) - - return ax From 2a1ca856a6affc1744feccdc22e3fc66141a4eca Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Wed, 6 Jun 2018 19:13:34 -0400 Subject: [PATCH 30/51] Substantial increase in coverage --- tests/utils/test_fmrisim.py | 346 +++++++++++++++++++++++++++++++++++- 1 file changed, 343 insertions(+), 3 deletions(-) diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index 49e99e283..e1271ad91 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -21,7 +21,7 @@ import numpy as np import math from brainiak.utils import fmrisim as sim - +import pytest def test_generate_signal(): @@ -29,8 +29,7 @@ def test_generate_signal(): dimensions = np.array([10, 10, 10]) # What is the size of the brain feature_size = [3] feature_type = ['cube'] - feature_coordinates = np.array( - [[5, 5, 5]]) + feature_coordinates = np.array([[5, 5, 5]]) signal_magnitude = [30] # Generate a volume representing the location and quality of the signal @@ -51,6 +50,7 @@ def test_generate_signal(): feature_coordinates = np.array( [[5, 5, 5], [3, 3, 3], [7, 7, 7]]) + # Check feature size is correct volume = sim.generate_signal(dimensions=dimensions, feature_coordinates=feature_coordinates, feature_type=['loop', 'cavity', 'sphere'], @@ -60,6 +60,35 @@ def test_generate_signal(): assert volume[3, 3, 3] == 0, "Cavity is empty" assert volume[7, 7, 7] != 0, "Sphere is not empty" + # Check feature size manipulation + volume = sim.generate_signal(dimensions=dimensions, + feature_coordinates=feature_coordinates, + feature_type=['loop', 'cavity', 'sphere'], + feature_size=[1], + signal_magnitude=signal_magnitude) + assert volume[5, 6, 6] == 0, "Loop is too big" + assert volume[3, 5, 5] == 0, "Cavity is too big" + assert volume[7, 9, 9] == 0, "Sphere is too big" + + # Check that out of bounds feature coordinates are corrected + feature_coordinates = np.array([0, 2, dimensions[2]]) + x, y, z = sim._insert_idxs(feature_coordinates, feature_size[0], + dimensions) + assert x[1] - x[0] == 2, "x min not corrected" + assert y[1] - y[0] == 3, "y was corrected when it shouldn't be" + assert z[1] - z[0] == 1, "z max not corrected" + + # Check that signal patterns are created + feature_coordinates = np.array([[5, 5, 5]]) + volume = sim.generate_signal(dimensions=dimensions, + feature_coordinates=feature_coordinates, + feature_type=feature_type, + feature_size=feature_size, + signal_magnitude=signal_magnitude, + signal_constant=0, + ) + assert volume[4:7, 4:7, 4:7].std() > 0, "Signal is constant" + def test_generate_stimfunction(): @@ -104,6 +133,68 @@ def test_generate_stimfunction(): assert 25 < max_response <= 30, "HRF has the incorrect length" assert np.sum(signal_function < 0) > 0, "No values below zero" + # Export a stimfunction + sim.export_3_column(stimfunction, + 'temp.txt', + ) + + # Load in the stimfunction + stimfunc_new = sim.generate_stimfunction(onsets=None, + event_durations=None, + total_time=duration, + timing_file='temp.txt', + ) + + assert np.all(stimfunc_new == stimfunction), "Export/import failed" + + # Break the timing precision of the generation + stimfunc_new = sim.generate_stimfunction(onsets=None, + event_durations=None, + total_time=duration, + timing_file='temp.txt', + temporal_resolution=0.5, + ) + + assert stimfunc_new.sum() == 0, "Temporal resolution not working right" + + # Set the duration to be too short so you should get an error + onsets = [10, 30, 50, 70, 90] + event_durations = [5] + with pytest.raises(ValueError): + sim.generate_stimfunction(onsets=onsets, + event_durations=event_durations, + total_time=89, + ) + + # Clip the event offset + stimfunc_new = sim.generate_stimfunction(onsets=onsets, + event_durations=event_durations, + total_time=95, + ) + assert stimfunc_new[-1] == 1, 'Event offset was not clipped' + + # Test exporting a group of participants to an epoch file + cond_a = sim.generate_stimfunction(onsets=onsets, + event_durations=event_durations, + total_time=110, + ) + + cond_b = sim.generate_stimfunction(onsets=[x + 5 for x in onsets], + event_durations=event_durations, + total_time=110, + ) + + stimfunction_group = [np.hstack((cond_a, cond_b))] * 2 + sim.export_epoch_file(stimfunction_group, + 'temp.txt', + tr_duration, + ) + + # Check that convolve throws a warning when the shape is wrong + _ = sim.convolve_hrf(stimfunction=np.hstack((cond_a, cond_b)).T, + tr_duration=tr_duration, + temporal_resolution=1, + ) def test_apply_signal(): @@ -138,6 +229,74 @@ def test_apply_signal(): tr_duration=tr_duration, ) + # Check that you can compute signal change appropriately + # Preset a bunch of things + stimfunction_tr = stimfunction[::int(tr_duration * 100)] + mask, template = sim.mask_brain(dimensions, mask_self=False) + noise_dict = sim._noise_dict_update({}) + noise = sim.generate_noise(dimensions=dimensions, + stimfunction_tr=stimfunction_tr, + tr_duration=tr_duration, + template=template, + mask=mask, + noise_dict=noise_dict, + iterations=[0, 0] + ) + coords = feature_coordinates[0] + noise_function = noise[coords[0], coords[1], coords[2], :] + noise_function = noise_function.reshape(duration // tr_duration, 1) + + + # Create the calibrated signal with PSC + magnitude = [0.5] + sig_a = sim.compute_signal_change(signal_function, + noise_function, + noise_dict, + magnitude, + 'PSC', + ) + magnitude = [1] + sig_b = sim.compute_signal_change(signal_function, + noise_function, + noise_dict, + magnitude, + 'PSC', + ) + + assert (abs(sig_b) - abs(sig_a)).min() >= 0, 'Magnitude modulation failed' + + # Check the other signal change metrics + _ = sim.compute_signal_change(signal_function, + noise_function, + noise_dict, + magnitude, + 'SFNR', + ) + _ = sim.compute_signal_change(signal_function, + noise_function, + noise_dict, + magnitude, + 'CNR_Amp/Noise-SD', + ) + _ = sim.compute_signal_change(signal_function, + noise_function, + noise_dict, + magnitude, + 'CNR_Amp2/Noise-Var_dB', + ) + _ = sim.compute_signal_change(signal_function, + noise_function, + noise_dict, + magnitude, + 'CNR_Signal-SD/Noise-SD', + ) + _ = sim.compute_signal_change(signal_function, + noise_function, + noise_dict, + magnitude, + 'CNR_Signal-Var/Noise-Var_dB', + ) + # Convolve the HRF with the stimulus sequence signal = sim.apply_signal(signal_function=signal_function, volume_signal=volume, @@ -153,6 +312,15 @@ def test_apply_signal(): assert np.any(signal == signal_magnitude), "The stimfunction is not binary" + # Check that there is an error if the number of signal voxels doesn't + # match the number of non zero brain voxels + with pytest.raises(IndexError): + sig_vox = (volume > 0).sum() + vox_pattern = np.tile(stimfunction, (1, sig_vox - 1)) + sim.apply_signal(signal_function=vox_pattern, + volume_signal=volume, + ) + def test_generate_noise(): @@ -235,6 +403,124 @@ def test_generate_noise(): assert system_low < system_high, "SFNR noise could not be manipulated" + # Check that you check for the appropriate template values + with pytest.raises(ValueError): + sim.generate_noise(dimensions=dimensions, + stimfunction_tr=stimfunction_tr, + tr_duration=tr_duration, + template=template * 2, + mask=mask, + noise_dict={}, + ) + + # Check that iterations does what it should + noise = sim.generate_noise(dimensions=dimensions, + stimfunction_tr=stimfunction_tr, + tr_duration=tr_duration, + template=template, + mask=mask, + noise_dict={}, + iterations=[0, 0], + ) + + noise = sim.generate_noise(dimensions=dimensions, + stimfunction_tr=stimfunction_tr, + tr_duration=tr_duration, + template=template, + mask=mask, + noise_dict={}, + iterations=None, + ) + + # Test drift noise + trs = 1000 + period = 100 + drift = sim._generate_noise_temporal_drift(trs, + tr_duration, + 'sine', + period, + ) + + # Check that the max frequency is the appropriate frequency + power = abs(np.fft.fft(drift))[1:trs // 2] + freq = np.linspace(1, trs // 2 - 1, trs // 2 - 1) / trs + period_freq = np.where(freq == 1 / (period // tr_duration)) + max_freq = np.argmax(power) + + assert period_freq == max_freq, 'Max frequency is not where it should be' + + # Do the same but now with cosine basis functions, answer should be close + drift = sim._generate_noise_temporal_drift(trs, + tr_duration, + 'discrete_cos', + period, + ) + + # Check that the appropriate frequency is peaky (may not be the max) + power = abs(np.fft.fft(drift))[1:trs // 2] + freq = np.linspace(1, trs // 2 - 1, trs // 2 - 1) / trs + period_freq = np.where(freq == 1 / (period // tr_duration))[0][0] + + assert power[period_freq] > power[period_freq + 1], 'Power is low' + assert power[period_freq] > power[period_freq - 1], 'Power is low' + + # Check it gives a warning if the duration is too short + drift = sim._generate_noise_temporal_drift(50, + tr_duration, + 'discrete_cos', + period, + ) + + # Test physiological noise (using unrealistic parameters so that it's easy) + timepoints = list(np.linspace(0, (trs - 1) * tr_duration, trs)) + resp_freq = 0.2 + heart_freq = 1.17 + phys = sim._generate_noise_temporal_phys(timepoints, + resp_freq, + heart_freq, + ) + + # Check that the max frequency is the appropriate frequency + power = abs(np.fft.fft(phys))[1:trs // 2] + freq = np.linspace(1, trs // 2 - 1, trs // 2 - 1) / (trs * tr_duration) + peaks = (power>(power.mean()+power.std())) # Where are the peaks + peak_freqs = freq[peaks] + + assert np.any(resp_freq == peak_freqs), 'Resp frequency not where it ' \ + 'should be' + assert len(peak_freqs) == 2, 'Two peaks not found' + + # Test task noise + task = sim._generate_noise_temporal_task(stimfunction_tr, + motion_noise='gaussian', + ) + task = sim._generate_noise_temporal_task(stimfunction_tr, + motion_noise='rician', + ) + + # Test ARMA noise + with pytest.raises(ValueError): + noise_dict={'fwhm': 4, 'auto_reg_rho': [1], 'ma_rho': [1, 1]} + sim._generate_noise_temporal_autoregression(stimfunction_tr, + noise_dict, + dimensions, + mask, + ) + + # Generate spatial noise + with pytest.raises(IndexError): + sim._generate_noise_spatial(np.array([10, 10, 10, trs])) + + # Turn all of the noise types on + noise_dict = {'physiological_sigma': 1, 'drift_sigma': 1, 'task_sigma': 1,} + sim.generate_noise(dimensions=dimensions, + stimfunction_tr=stimfunction_tr, + tr_duration=tr_duration, + template=template, + mask=mask, + noise_dict=noise_dict, + ) + def test_mask_brain(): @@ -278,6 +564,20 @@ def test_mask_brain(): assert np.sum(brain != 0) < np.sum(volume != 0), "Masking did not work" + # Test that you can load the default + dimensions = np.array([100, 100, 100]) + mask, template = sim.mask_brain(dimensions, mask_self=False) + + assert mask[20, 80, 50] == 0, 'Masking didn''t work' + assert mask[25, 80, 50] == 1, 'Masking didn''t work' + assert int(template[25, 80, 50] * 100) == 57, 'Template not correct' + + # Check that you can mask self + mask_self, template_self = sim.mask_brain(template, mask_self=True) + + assert (template_self - template).sum() < 1e2, 'Mask self error' + assert (mask_self - mask).sum() == 0, 'Mask self error' + def test_calc_noise(): @@ -333,11 +633,43 @@ def test_calc_noise(): assert nd_new['sfnr'] > 0, 'sfnr out of range' assert nd_new['auto_reg_rho'][0] > 0, 'ar out of range' + # Check that the dilation increases SNR + no_dilation_snr = sim._calc_snr(noise_matched, + mask, + dilation=0, + tr=tr_duration, + ) + + assert nd_new['snr'] > no_dilation_snr, "Dilation did not increase SNR" + + # Check that template size is in bounds + with pytest.raises(ValueError): + sim.calc_noise(noise, mask, template * 2) + + # Check that Mask is set is checked + with pytest.raises(ValueError): + sim.calc_noise(noise, None, template) + + # Check that it can deal with missing noise parameters + temp_nd = sim.calc_noise(noise, mask, template, noise_dict={}) + assert temp_nd['voxel_size'][0] == 1, 'Default voxel size not set' + + temp_nd = sim.calc_noise(noise, mask, template, noise_dict=None) + assert temp_nd['voxel_size'][0] == 1, 'Default voxel size not set' + # Check that the fitting worked snr_diff = abs(nd_orig['snr'] - nd_new['snr']) snr_diff_match = abs(nd_orig['snr'] - nd_matched['snr']) assert snr_diff > snr_diff_match, 'snr fit incorrectly' + # Test that you can generate rician and exponential noise + sim._generate_noise_system(dimensions_tr, + 1, + 1, + spatial_noise_type = 'exponential', + temporal_noise_type = 'rician', + ) + # Check the temporal noise match nd_orig['matched'] = 1 noise_matched = sim.generate_noise(dimensions=dimensions_tr[0:3], @@ -359,3 +691,11 @@ def test_calc_noise(): ar1_diff_match = abs(nd_orig['auto_reg_rho'][0] - nd_matched[ 'auto_reg_rho'][0]) assert ar1_diff > ar1_diff_match, 'AR1 fit incorrectly' + + # Check that you can calculate ARMA for a single voxel + vox = noise[5, 5, 5, :] + arma = sim._calc_ARMA_noise(vox, + None, + sample_num=2, + ) + assert len(arma) == 2, "Two outputs not given by ARMA" From 492f522f9250f6d751f23eff6b5f2e4a2a630c6d Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Wed, 6 Jun 2018 22:15:30 -0400 Subject: [PATCH 31/51] Removed plot_brain because other tools like nilearn do a much better job at plotting data. Minor edits and tweaks based on testing --- brainiak/utils/fmrisim.py | 1 - 1 file changed, 1 deletion(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 6fc4680dc..5e41cae7f 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -97,7 +97,6 @@ "generate_noise", "mask_brain", "compute_signal_change", - "plot_brain", ] logger = logging.getLogger(__name__) From c15c2a407b5215f7c28fff8f7f7b1cceae4e0b12 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Wed, 6 Jun 2018 22:45:45 -0400 Subject: [PATCH 32/51] PEP8 --- brainiak/utils/fmrisim.py | 1 - 1 file changed, 1 deletion(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 5e41cae7f..b9eeba2f4 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -2885,4 +2885,3 @@ def compute_signal_change(signal_function, # Return the scaled time course return signal_function_scaled - From e916a3dac1946f102e1e0baa6ce0b2b06e7d7034 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Wed, 6 Jun 2018 23:15:08 -0400 Subject: [PATCH 33/51] PEP8 --- tests/utils/test_fmrisim.py | 128 ++++++++++++++++++------------------ 1 file changed, 64 insertions(+), 64 deletions(-) diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index e1271ad91..0b2829d1e 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -23,6 +23,7 @@ from brainiak.utils import fmrisim as sim import pytest + def test_generate_signal(): # Inputs for generate_signal @@ -191,10 +192,11 @@ def test_generate_stimfunction(): ) # Check that convolve throws a warning when the shape is wrong - _ = sim.convolve_hrf(stimfunction=np.hstack((cond_a, cond_b)).T, - tr_duration=tr_duration, - temporal_resolution=1, - ) + sim.convolve_hrf(stimfunction=np.hstack((cond_a, cond_b)).T, + tr_duration=tr_duration, + temporal_resolution=1, + ) + def test_apply_signal(): @@ -246,7 +248,6 @@ def test_apply_signal(): noise_function = noise[coords[0], coords[1], coords[2], :] noise_function = noise_function.reshape(duration // tr_duration, 1) - # Create the calibrated signal with PSC magnitude = [0.5] sig_a = sim.compute_signal_change(signal_function, @@ -266,36 +267,36 @@ def test_apply_signal(): assert (abs(sig_b) - abs(sig_a)).min() >= 0, 'Magnitude modulation failed' # Check the other signal change metrics - _ = sim.compute_signal_change(signal_function, - noise_function, - noise_dict, - magnitude, - 'SFNR', - ) - _ = sim.compute_signal_change(signal_function, - noise_function, - noise_dict, - magnitude, - 'CNR_Amp/Noise-SD', - ) - _ = sim.compute_signal_change(signal_function, - noise_function, - noise_dict, - magnitude, - 'CNR_Amp2/Noise-Var_dB', - ) - _ = sim.compute_signal_change(signal_function, - noise_function, - noise_dict, - magnitude, - 'CNR_Signal-SD/Noise-SD', - ) - _ = sim.compute_signal_change(signal_function, - noise_function, - noise_dict, - magnitude, - 'CNR_Signal-Var/Noise-Var_dB', - ) + sim.compute_signal_change(signal_function, + noise_function, + noise_dict, + magnitude, + 'SFNR', + ) + sim.compute_signal_change(signal_function, + noise_function, + noise_dict, + magnitude, + 'CNR_Amp/Noise-SD', + ) + sim.compute_signal_change(signal_function, + noise_function, + noise_dict, + magnitude, + 'CNR_Amp2/Noise-Var_dB', + ) + sim.compute_signal_change(signal_function, + noise_function, + noise_dict, + magnitude, + 'CNR_Signal-SD/Noise-SD', + ) + sim.compute_signal_change(signal_function, + noise_function, + noise_dict, + magnitude, + 'CNR_Signal-Var/Noise-Var_dB', + ) # Convolve the HRF with the stimulus sequence signal = sim.apply_signal(signal_function=signal_function, @@ -414,23 +415,23 @@ def test_generate_noise(): ) # Check that iterations does what it should - noise = sim.generate_noise(dimensions=dimensions, - stimfunction_tr=stimfunction_tr, - tr_duration=tr_duration, - template=template, - mask=mask, - noise_dict={}, - iterations=[0, 0], - ) + sim.generate_noise(dimensions=dimensions, + stimfunction_tr=stimfunction_tr, + tr_duration=tr_duration, + template=template, + mask=mask, + noise_dict={}, + iterations=[0, 0], + ) - noise = sim.generate_noise(dimensions=dimensions, - stimfunction_tr=stimfunction_tr, - tr_duration=tr_duration, - template=template, - mask=mask, - noise_dict={}, - iterations=None, - ) + sim.generate_noise(dimensions=dimensions, + stimfunction_tr=stimfunction_tr, + tr_duration=tr_duration, + template=template, + mask=mask, + noise_dict={}, + iterations=None, + ) # Test drift noise trs = 1000 @@ -483,24 +484,23 @@ def test_generate_noise(): # Check that the max frequency is the appropriate frequency power = abs(np.fft.fft(phys))[1:trs // 2] freq = np.linspace(1, trs // 2 - 1, trs // 2 - 1) / (trs * tr_duration) - peaks = (power>(power.mean()+power.std())) # Where are the peaks + peaks = (power > (power.mean() + power.std())) # Where are the peaks peak_freqs = freq[peaks] - assert np.any(resp_freq == peak_freqs), 'Resp frequency not where it ' \ - 'should be' + assert np.any(resp_freq == peak_freqs), 'Resp frequency not found' assert len(peak_freqs) == 2, 'Two peaks not found' # Test task noise - task = sim._generate_noise_temporal_task(stimfunction_tr, - motion_noise='gaussian', - ) - task = sim._generate_noise_temporal_task(stimfunction_tr, - motion_noise='rician', - ) + sim._generate_noise_temporal_task(stimfunction_tr, + motion_noise='gaussian', + ) + sim._generate_noise_temporal_task(stimfunction_tr, + motion_noise='rician', + ) # Test ARMA noise with pytest.raises(ValueError): - noise_dict={'fwhm': 4, 'auto_reg_rho': [1], 'ma_rho': [1, 1]} + noise_dict = {'fwhm': 4, 'auto_reg_rho': [1], 'ma_rho': [1, 1]} sim._generate_noise_temporal_autoregression(stimfunction_tr, noise_dict, dimensions, @@ -512,7 +512,7 @@ def test_generate_noise(): sim._generate_noise_spatial(np.array([10, 10, 10, trs])) # Turn all of the noise types on - noise_dict = {'physiological_sigma': 1, 'drift_sigma': 1, 'task_sigma': 1,} + noise_dict = {'physiological_sigma': 1, 'drift_sigma': 1, 'task_sigma': 1} sim.generate_noise(dimensions=dimensions, stimfunction_tr=stimfunction_tr, tr_duration=tr_duration, @@ -666,8 +666,8 @@ def test_calc_noise(): sim._generate_noise_system(dimensions_tr, 1, 1, - spatial_noise_type = 'exponential', - temporal_noise_type = 'rician', + spatial_noise_type='exponential', + temporal_noise_type='rician', ) # Check the temporal noise match From 5d1193ff4952f5271702c4051e3d9d01f30fe5a0 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Thu, 7 Jun 2018 10:28:51 -0400 Subject: [PATCH 34/51] Cut iterations because you can't fit easily with different noise parameters --- tests/utils/test_fmrisim.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index 0b2829d1e..1170f2e56 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -511,14 +511,16 @@ def test_generate_noise(): with pytest.raises(IndexError): sim._generate_noise_spatial(np.array([10, 10, 10, trs])) - # Turn all of the noise types on - noise_dict = {'physiological_sigma': 1, 'drift_sigma': 1, 'task_sigma': 1} + # Switch some of the noise types on + noise_dict = {'physiological_sigma': 1, 'drift_sigma': 1, 'task_sigma': + 1, 'auto_reg_sigma': 0} sim.generate_noise(dimensions=dimensions, stimfunction_tr=stimfunction_tr, tr_duration=tr_duration, template=template, mask=mask, noise_dict=noise_dict, + iterations=[0, 0], ) From 6dc19c9c381af758c5df80594dec24ec7bcea073 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Thu, 7 Jun 2018 10:46:08 -0400 Subject: [PATCH 35/51] PEP8 --- tests/utils/test_fmrisim.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index 1170f2e56..d34be29d5 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -512,8 +512,8 @@ def test_generate_noise(): sim._generate_noise_spatial(np.array([10, 10, 10, trs])) # Switch some of the noise types on - noise_dict = {'physiological_sigma': 1, 'drift_sigma': 1, 'task_sigma': - 1, 'auto_reg_sigma': 0} + noise_dict = dict(physiological_sigma=1, drift_sigma=1, task_sigma=1, + auto_reg_sigma=0) sim.generate_noise(dimensions=dimensions, stimfunction_tr=stimfunction_tr, tr_duration=tr_duration, From 557158badeba06031c21a84564f9ffb5a80b6fc7 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Thu, 7 Jun 2018 11:16:53 -0400 Subject: [PATCH 36/51] PEP8 --- tests/utils/test_fmrisim.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index d34be29d5..9f4d0e5b3 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -512,7 +512,7 @@ def test_generate_noise(): sim._generate_noise_spatial(np.array([10, 10, 10, trs])) # Switch some of the noise types on - noise_dict = dict(physiological_sigma=1, drift_sigma=1, task_sigma=1, + noise_dict = dict(physiological_sigma=1, drift_sigma=1, task_sigma=1, auto_reg_sigma=0) sim.generate_noise(dimensions=dimensions, stimfunction_tr=stimfunction_tr, From 559da38adc257603d8de83fd5d7451ca6bfb584d Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Thu, 7 Jun 2018 14:00:59 -0400 Subject: [PATCH 37/51] Fix the error --- brainiak/utils/fmrisim.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index b9eeba2f4..534363c4c 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1263,7 +1263,7 @@ def _calc_ARMA_noise(volume, model = ARMA(demeaned_timecourse, [auto_reg_order, ma_order]) model_fit = model.fit(disp=False) params = model_fit.params - except ValueError: + except LinAlgError: params = np.ones(auto_reg_order + ma_order + 1) * np.nan # Add to the list From fcb3d9486b57f155acceca3dad6c11cc6c9be9e3 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Thu, 7 Jun 2018 14:12:54 -0400 Subject: [PATCH 38/51] Fix the error --- brainiak/utils/fmrisim.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 534363c4c..c5d5b232e 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1242,6 +1242,10 @@ def _calc_ARMA_noise(volume, # timecourse brain_timecourse = volume.reshape(1, len(volume)) + # Create for later use + class LinAlgError(Exception): + pass + # Identify some brain voxels to assess voxel_idxs = list(range(brain_timecourse.shape[0])) np.random.shuffle(voxel_idxs) From 8646b1a6819e89ace8e64595b76c31fe65bfcb43 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Thu, 7 Jun 2018 14:16:09 -0400 Subject: [PATCH 39/51] Import class --- brainiak/utils/fmrisim.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index c5d5b232e..d210b9537 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -80,6 +80,7 @@ from statsmodels.tsa.arima_model import ARMA import math import numpy as np +from numpy.linalg import LinAlgError from pkg_resources import resource_stream from scipy import stats from scipy import signal @@ -1242,10 +1243,6 @@ def _calc_ARMA_noise(volume, # timecourse brain_timecourse = volume.reshape(1, len(volume)) - # Create for later use - class LinAlgError(Exception): - pass - # Identify some brain voxels to assess voxel_idxs = list(range(brain_timecourse.shape[0])) np.random.shuffle(voxel_idxs) From f0681e9c2e216060fa27818a100c30c739d589b6 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Fri, 8 Jun 2018 09:02:50 -0400 Subject: [PATCH 40/51] Fixed error exception --- brainiak/utils/fmrisim.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index d210b9537..84a4d5cd5 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1264,7 +1264,7 @@ def _calc_ARMA_noise(volume, model = ARMA(demeaned_timecourse, [auto_reg_order, ma_order]) model_fit = model.fit(disp=False) params = model_fit.params - except LinAlgError: + except (ValueError, LinAlgError): params = np.ones(auto_reg_order + ma_order + 1) * np.nan # Add to the list From 98b94469bdb9f7a5a4aa1b445879a76c1a058a26 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 26 Jun 2018 13:36:40 -0400 Subject: [PATCH 41/51] Incorporated Mingbo's edits --- brainiak/utils/fmrisim.py | 59 ++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 10 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 84a4d5cd5..6e43b00d2 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -609,7 +609,9 @@ def export_epoch_file(stimfunction, file used in Brainiak. The epoch file is a way to structure the timing information in fMRI that allows you to flexibly input different stimulus sequences. This is a list with each entry a 3d matrix corresponding to a - participant. The dimensions of the 3d matrix are condition by epoch by time + participant. The dimensions of the 3d matrix are condition by epoch by + time. For the i-th condition, if its k-th epoch spans time points t_m to + t_n-1, then [i, k, t_m:t_n] are 1 in the epoch file. Parameters ---------- @@ -1221,8 +1223,8 @@ def _calc_ARMA_noise(volume, How many voxels would you like to sample to calculate the AR values. The AR distribution of real data is approximately exponential maxing at 1. From analyses across a number of participants, to get less - than 1% standard deviation of error from the true mean it is - necessary to sample at least 1000 voxels. + than 3% standard deviation of error from the true mean it is + necessary to sample at least 100 voxels. Returns ------- @@ -1674,6 +1676,9 @@ def _generate_noise_temporal_autoregression(timepoints, fwhm=noise_dict['fwhm'], ) + # Store all of the noise volumes + err_vols[:, :, :, tr_counter] = noise + if tr_counter == 0: noise_autoregression[:, :, :, tr_counter] = noise @@ -1698,9 +1703,6 @@ def _generate_noise_temporal_autoregression(timepoints, # then consider the error terms if ma_order >= pCounter: - # Collect the noise from the previous TRs - err_vols[:, :, :, tr_counter] = noise - # Pull out a previous TR past_noise = err_vols[:, :, :, past_TR] @@ -1732,10 +1734,10 @@ def _generate_noise_temporal_phys(timepoints, What time points, in seconds, are sampled by a TR resp_freq : float - What is the frequency of respiration (in s) + What is the frequency of respiration (in Hz) heart_freq : float - What is the frequency of heart beat (in s) + What is the frequency of heart beat (in Hz) Returns ---------- @@ -1819,7 +1821,7 @@ def _generate_noise_spatial(dimensions, # Check the input is correct if len(dimensions) == 4: - raise IndexError('4 dimensions have been supplied, only using 3') + logger.warning('4 dimensions have been supplied, only using 3') dimensions = dimensions[0:3] def _logfunc(x, a, b, c): @@ -2217,6 +2219,25 @@ def _noise_dict_update(noise_dict): parameter that describes how much noise these components contribute to the brain. If you set the noise dict to matched then it will fit the parameters to match the participant as best as possible. + The noise variables are as follows: + snr [float]: Size of the spatial noise + sfnr [float]: Size of the temporal noise. This is the total variability + that the following sigmas 'sum' to: + task_sigma [float]: Size of the variance of task specific noise + drift_sigma [float]: Size of the variance of drift noise + auto_reg_sigma [float]: Size of the variance of autoregressive + noise + physiological_sigma [float]: Size of the variance of + physiological noise + auto_reg_rho [list]: The coefficients of the autoregressive + components you are modeling + ma_rho [list]:The coefficients of the moving average components you + are modeling + max_activity [float]: The max value of the averaged brain in order + to reference the template + voxel_size [list]: The mm size of the voxels + fwhm [float]: The gaussian smoothing kernel size (mm) + matched [bool]: Specify whether you are fitting the noise parameters Returns ------- @@ -2593,7 +2614,25 @@ def generate_noise(dimensions, noise_dict : dictionary, float This is a dictionary which describes the noise parameters of the data. If there are no other variables provided then it will use - default values + default values. The noise variables are as follows: + snr [float]: Size of the spatial noise + sfnr [float]: Size of the temporal noise. This is the total variability + that the following sigmas 'sum' to: + task_sigma [float]: Size of the variance of task specific noise + drift_sigma [float]: Size of the variance of drift noise + auto_reg_sigma [float]: Size of the variance of autoregressive + noise + physiological_sigma [float]: Size of the variance of + physiological noise + auto_reg_rho [list]: The coefficients of the autoregressive + components you are modeling + ma_rho [list]:The coefficients of the moving average components you + are modeling + max_activity [float]: The max value of the averaged brain in order + to reference the template + voxel_size [list]: The mm size of the voxels + fwhm [float]: The gaussian smoothing kernel size (mm) + matched [bool]: Specify whether you are fitting the noise parameters temporal_proportion, float What is the proportion of the temporal variance (as specified by the From 6e90c17f6f0c9849d222bd21ab4b5fb08c6b2e3d Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 26 Jun 2018 14:12:26 -0400 Subject: [PATCH 42/51] Updated smoothing dimensions test --- tests/utils/test_fmrisim.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index 9f4d0e5b3..21466b658 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -369,6 +369,7 @@ def test_generate_noise(): assert len(np.unique(template) > 2), "Template creation did not work" stimfunction_tr = stimfunction[::int(tr_duration * 100)] + # Create the noise volumes (using the default parameters) noise = sim.generate_noise(dimensions=dimensions, stimfunction_tr=stimfunction_tr, @@ -508,8 +509,8 @@ def test_generate_noise(): ) # Generate spatial noise - with pytest.raises(IndexError): - sim._generate_noise_spatial(np.array([10, 10, 10, trs])) + vol = sim._generate_noise_spatial(np.array([10, 10, 10, trs])) + assert len(vol.shape) == 3, 'Volume was not reshaped to ignore TRs' # Switch some of the noise types on noise_dict = dict(physiological_sigma=1, drift_sigma=1, task_sigma=1, From e52d0f0b8002204c5fc1733d1a37a8536706b9e7 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Tue, 26 Jun 2018 15:12:47 -0400 Subject: [PATCH 43/51] Indent error --- brainiak/utils/fmrisim.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 6e43b00d2..baba7799d 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -2223,12 +2223,13 @@ def _noise_dict_update(noise_dict): snr [float]: Size of the spatial noise sfnr [float]: Size of the temporal noise. This is the total variability that the following sigmas 'sum' to: - task_sigma [float]: Size of the variance of task specific noise - drift_sigma [float]: Size of the variance of drift noise - auto_reg_sigma [float]: Size of the variance of autoregressive - noise - physiological_sigma [float]: Size of the variance of - physiological noise + + task_sigma [float]: Size of the variance of task specific noise + drift_sigma [float]: Size of the variance of drift noise + auto_reg_sigma [float]: Size of the variance of autoregressive noise + physiological_sigma [float]: Size of the variance of physiological + noise + auto_reg_rho [list]: The coefficients of the autoregressive components you are modeling ma_rho [list]:The coefficients of the moving average components you @@ -2618,12 +2619,13 @@ def generate_noise(dimensions, snr [float]: Size of the spatial noise sfnr [float]: Size of the temporal noise. This is the total variability that the following sigmas 'sum' to: - task_sigma [float]: Size of the variance of task specific noise - drift_sigma [float]: Size of the variance of drift noise - auto_reg_sigma [float]: Size of the variance of autoregressive - noise - physiological_sigma [float]: Size of the variance of - physiological noise + + task_sigma [float]: Size of the variance of task specific noise + drift_sigma [float]: Size of the variance of drift noise + auto_reg_sigma [float]: Size of the variance of autoregressive noise + physiological_sigma [float]: Size of the variance of physiological + noise + auto_reg_rho [list]: The coefficients of the autoregressive components you are modeling ma_rho [list]:The coefficients of the moving average components you From 2cf25818ec99e54760552d7fec38ef58c6177913 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Mon, 23 Jul 2018 18:52:17 -0400 Subject: [PATCH 44/51] Changed the fitting procedure of _fit_spatial and _fit_temporal so that they are not made negative by default, added more description throughout --- brainiak/utils/fmrisim.py | 91 +++++++++++++--------- tests/utils/test_fmrisim.py | 151 ++++++++++++++++++++++++++---------- 2 files changed, 165 insertions(+), 77 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index baba7799d..1bc48f297 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -71,8 +71,9 @@ used by others (Welvaert & Rosseel, 2013) Authors: - Cameron Ellis (Princeton) 2016-2017 + Cameron Ellis (Princeton & Yale) 2016-2018 Chris Baldassano (Princeton) 2016-2017 + Mingbo Cai (Princeton) 2017 """ import logging @@ -2220,13 +2221,16 @@ def _noise_dict_update(noise_dict): to the brain. If you set the noise dict to matched then it will fit the parameters to match the participant as best as possible. The noise variables are as follows: - snr [float]: Size of the spatial noise - sfnr [float]: Size of the temporal noise. This is the total variability - that the following sigmas 'sum' to: + + snr [float]: Ratio of MR signal to the spatial noise + sfnr [float]: Ratio of the MR signal to the temporal noise. This is the + total variability that the following sigmas 'sum' to: task_sigma [float]: Size of the variance of task specific noise drift_sigma [float]: Size of the variance of drift noise - auto_reg_sigma [float]: Size of the variance of autoregressive noise + auto_reg_sigma [float]: Size of the variance of autoregressive + noise. This is an ARMA process where the AR and MA components can be + separately specified physiological_sigma [float]: Size of the variance of physiological noise @@ -2240,6 +2244,9 @@ def _noise_dict_update(noise_dict): fwhm [float]: The gaussian smoothing kernel size (mm) matched [bool]: Specify whether you are fitting the noise parameters + The volumes of brain noise that are generated have smoothness + specified by 'fwhm' + Returns ------- @@ -2380,7 +2387,7 @@ def _fit_spatial(noise, ) # Sum up the noise of the brain - noise = base + (noise_temporal * (1 - temporal_sd)) + noise_system + noise = base + (noise_temporal * temporal_sd) + noise_system # Reject negative values (only happens outside of the brain) noise[noise < 0] = 0 @@ -2389,7 +2396,7 @@ def _fit_spatial(noise, if iterations == 0: logger.info('No fitting iterations were run') elif iteration == iterations: - logger.info('SNR failed to converge.') + logger.warning('SNR failed to converge.') # Return the updated noise return noise, spatial_sd @@ -2531,13 +2538,13 @@ def _fit_temporal(noise, temp_sd_new = mean_signal / new_sfnr temporal_sd -= ((temp_sd_new - temp_sd_orig) * fit_delta) - # Set the new system noise - temp_sd_system_new = np.sqrt((temporal_sd ** 2) * temporal_proportion) - # Prevent these going out of range if temporal_sd < 0 or np.isnan(temporal_sd): temporal_sd = 10e-3 + # Set the new system noise + temp_sd_system_new = np.sqrt((temporal_sd ** 2) * temporal_proportion) + # Get the new AR value new_nd['auto_reg_rho'][0] -= (ar_diff * fit_delta) @@ -2561,7 +2568,7 @@ def _fit_temporal(noise, ) # Sum up the noise of the brain - noise = base + (noise_temporal * (1 - temporal_sd)) + noise_system + noise = base + (noise_temporal * temporal_sd) + noise_system # Reject negative values (only happens outside of the brain) noise[noise < 0] = 0 @@ -2570,7 +2577,7 @@ def _fit_temporal(noise, if iterations == 0: logger.info('No fitting iterations were run') elif iteration == iterations: - logger.info('AR failed to converge.') + logger.warning('AR failed to converge.') # Return the updated noise return noise @@ -2616,13 +2623,16 @@ def generate_noise(dimensions, This is a dictionary which describes the noise parameters of the data. If there are no other variables provided then it will use default values. The noise variables are as follows: - snr [float]: Size of the spatial noise - sfnr [float]: Size of the temporal noise. This is the total variability - that the following sigmas 'sum' to: + + snr [float]: Ratio of MR signal to the spatial noise + sfnr [float]: Ratio of the MR signal to the temporal noise. This is the + total variability that the following sigmas 'sum' to: task_sigma [float]: Size of the variance of task specific noise drift_sigma [float]: Size of the variance of drift noise - auto_reg_sigma [float]: Size of the variance of autoregressive noise + auto_reg_sigma [float]: Size of the variance of autoregressive + noise. This is an ARMA process where the AR and MA components can be + separately specified physiological_sigma [float]: Size of the variance of physiological noise @@ -2636,6 +2646,9 @@ def generate_noise(dimensions, fwhm [float]: The gaussian smoothing kernel size (mm) matched [bool]: Specify whether you are fitting the noise parameters + The volumes of brain noise that are generated have smoothness + specified by 'fwhm' + temporal_proportion, float What is the proportion of the temporal variance (as specified by the SFNR noise parameter) that is accounted for by the system noise. If @@ -2739,7 +2752,7 @@ def generate_noise(dimensions, ) # Sum up the noise of the brain - noise = base + (noise_temporal * (1 - temporal_sd)) + noise_system + noise = base + (noise_temporal * temporal_sd) + noise_system # Reject negative values (only happens outside of the brain) noise[noise < 0] = 0 @@ -2782,11 +2795,12 @@ def compute_signal_change(signal_function, magnitude, method='PSC', ): - """ Rescale the current a signal functions based on a metric and - magnitude supplied. Metrics are heavily influenced by Welvaert & Rosseel - (2013). The rescaling is based on the maximal activity in the - timecourse. Importantly, all values within the signal_function are - scaled to have a min of -1 or max of 1 + """ Rescale the signal to be a given magnitude, based on a specified + metric (e.g. percent signal change). Metrics are heavily inspired by + Welvaert & Rosseel (2013). The rescaling is based on the maximal + activity in the timecourse. Importantly, all values within the + signal_function are scaled to have a min of -1 or max of 1, meaning that + the voxel value will be the same as the magnitude. Parameters ---------- @@ -2797,7 +2811,12 @@ def compute_signal_change(signal_function, multiple time courses specified as different columns in this array. Conceivably you could use the output of generate_stimfunction as the input but the temporal variance - will be incorrect + will be incorrect. Critically, different values across voxels are + considered relative to each other, not independently. E.g., if the + voxel has a peak signal twice as high as another voxel's, then this + means that the signal after these transformations will still be + twice as high (according to the metric) in the first voxel relative + to the second noise_function : timepoint by voxel numpy array The time course of noise (a voxel created from generate_noise) @@ -2847,7 +2866,7 @@ def compute_signal_change(signal_function, # If you have only one magnitude value, duplicate the magnitude for each # timecourse you have if len(magnitude) == 1: - magnitude *= signal_function.shape[1] + magnitude = np.ones((signal_function.shape[1], 1)) * magnitude[0] # Scale all signals that to have a range of -1 to 1. This is # so that any values less than this will be scaled appropriately @@ -2862,6 +2881,10 @@ def compute_signal_change(signal_function, noise_voxel = noise_function[:, voxel_counter] magnitude_voxel = magnitude[voxel_counter] + # Calculate the maximum signal amplitude (likely to be 1, + # but not necessarily) + max_amp = np.max(np.abs(sig_voxel)) + # Calculate the scaled time course using the specified method if method == 'SFNR': @@ -2883,18 +2906,14 @@ def compute_signal_change(signal_function, elif method == 'CNR_Amp2/Noise-Var_dB': - # Calculate the current signal amplitude (likely to be 1, - # but not necessarily) - sig_amp = np.max(np.abs(sig_voxel)) - # What is the standard deviation of the noise noise_std = np.std(noise_voxel) # Rearrange the equation to compute the size of signal change in # decibels - scale = 10 ** ((magnitude_voxel / sig_amp) + np.log10(noise_std - ** 2)) - new_sig = sig_voxel * np.sqrt(scale) + scale = (10 ** (magnitude_voxel / 20)) * noise_std / max_amp + + new_sig = sig_voxel * scale elif method == 'CNR_Signal-SD/Noise-SD': @@ -2904,7 +2923,8 @@ def compute_signal_change(signal_function, # Multiply the signal timecourse by the the CNR and noise ( # rearranging eq.) - new_sig = sig_voxel * (magnitude_voxel * noise_std / sig_std) + new_sig = sig_voxel * ((magnitude_voxel / max_amp) * noise_std + / sig_std) elif method == 'CNR_Signal-Var/Noise-Var_dB': # What is the standard deviation of the signal and noise @@ -2913,9 +2933,10 @@ def compute_signal_change(signal_function, # Rearrange the equation to compute the size of signal change in # decibels - scale = 10 ** ((magnitude_voxel / sig_std) + np.log10(noise_std - ** 2)) - new_sig = sig_voxel * np.sqrt(scale) + scale = (10 ** (magnitude_voxel / 20)) * noise_std / (max_amp * + sig_std) + + new_sig = sig_voxel * scale elif method == 'PSC': diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index 21466b658..30d9e75fa 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -245,58 +245,125 @@ def test_apply_signal(): iterations=[0, 0] ) coords = feature_coordinates[0] - noise_function = noise[coords[0], coords[1], coords[2], :] - noise_function = noise_function.reshape(duration // tr_duration, 1) + noise_function_a = noise[coords[0], coords[1], coords[2], :] + noise_function_a = noise_function_a.reshape(duration // tr_duration, 1) + + noise_function_b = noise[coords[0] + 1, coords[1], coords[2], :] + noise_function_b = noise_function_b.reshape(duration // tr_duration, 1) # Create the calibrated signal with PSC - magnitude = [0.5] + method = 'PSC' sig_a = sim.compute_signal_change(signal_function, - noise_function, + noise_function_a, noise_dict, - magnitude, - 'PSC', + [0.5], + method, ) - magnitude = [1] sig_b = sim.compute_signal_change(signal_function, - noise_function, + noise_function_a, noise_dict, - magnitude, - 'PSC', + [1.0], + method, ) - assert (abs(sig_b) - abs(sig_a)).min() >= 0, 'Magnitude modulation failed' + assert sig_b.max() / sig_a.max() == 2, 'PSC modulation failed' - # Check the other signal change metrics - sim.compute_signal_change(signal_function, - noise_function, - noise_dict, - magnitude, - 'SFNR', - ) - sim.compute_signal_change(signal_function, - noise_function, - noise_dict, - magnitude, - 'CNR_Amp/Noise-SD', - ) - sim.compute_signal_change(signal_function, - noise_function, - noise_dict, - magnitude, - 'CNR_Amp2/Noise-Var_dB', - ) - sim.compute_signal_change(signal_function, - noise_function, - noise_dict, - magnitude, - 'CNR_Signal-SD/Noise-SD', - ) - sim.compute_signal_change(signal_function, - noise_function, - noise_dict, - magnitude, - 'CNR_Signal-Var/Noise-Var_dB', - ) + # Create the calibrated signal with SFNR + method = 'SFNR' + sig_a = sim.compute_signal_change(signal_function, + noise_function_a, + noise_dict, + [0.5], + method, + ) + scaled_a = sig_a / (noise_function_a.mean() / noise_dict['sfnr']) + sig_b = sim.compute_signal_change(signal_function, + noise_function_b, + noise_dict, + [1.0], + method, + ) + scaled_b = sig_b / (noise_function_b.mean() / noise_dict['sfnr']) + + assert scaled_b.max() / scaled_a.max() == 2, 'SFNR modulation failed' + + # Create the calibrated signal with CNR_Amp/Noise-SD + method = 'CNR_Amp/Noise-SD' + sig_a = sim.compute_signal_change(signal_function, + noise_function_a, + noise_dict, + [0.5], + method, + ) + scaled_a = sig_a / noise_function_a.std() + sig_b = sim.compute_signal_change(signal_function, + noise_function_b, + noise_dict, + [1.0], + method, + ) + scaled_b = sig_b / noise_function_b.std() + + assert scaled_b.max() / scaled_a.max() == 2, 'CNR_Amp modulation failed' + + # Create the calibrated signal with CNR_Amp/Noise-Var_dB + method = 'CNR_Amp2/Noise-Var_dB' + sig_a = sim.compute_signal_change(signal_function, + noise_function_a, + noise_dict, + [0.5], + method, + ) + scaled_a = np.log(sig_a.max() / noise_function_a.std()) + sig_b = sim.compute_signal_change(signal_function, + noise_function_b, + noise_dict, + [1.0], + method, + ) + scaled_b = np.log(sig_b.max() / noise_function_b.std()) + + assert np.round(scaled_b / scaled_a) == 2, 'CNR_Amp dB modulation failed' + + # Create the calibrated signal with CNR_Signal-SD/Noise-SD + method = 'CNR_Signal-SD/Noise-SD' + sig_a = sim.compute_signal_change(signal_function, + noise_function_a, + noise_dict, + [0.5], + method, + ) + scaled_a = sig_a.std() / noise_function_a.std() + sig_b = sim.compute_signal_change(signal_function, + noise_function_a, + noise_dict, + [1.0], + method, + ) + scaled_b = sig_b.std() / noise_function_a.std() + + assert (scaled_b / scaled_a) == 2, 'CNR signal modulation failed' + + # Create the calibrated signal with CNR_Amp/Noise-Var_dB + method = 'CNR_Signal-Var/Noise-Var_dB' + sig_a = sim.compute_signal_change(signal_function, + noise_function_a, + noise_dict, + [0.5], + method, + ) + + scaled_a = np.log(sig_a.std() / noise_function_a.std()) + sig_b = sim.compute_signal_change(signal_function, + noise_function_b, + noise_dict, + [1.0], + method, + ) + scaled_b = np.log(sig_b.std() / noise_function_b.std()) + + assert np.round(scaled_b / scaled_a) == 2, 'CNR signal dB modulation ' \ + 'failed' # Convolve the HRF with the stimulus sequence signal = sim.apply_signal(signal_function=signal_function, From f9c7ff1f849400baeddcb4e9a3ddeb9ca592a718 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Fri, 10 Aug 2018 14:14:21 -0400 Subject: [PATCH 45/51] Corrected error in generating system noise. Specifically spatial noise was being double counted --- brainiak/utils/fmrisim.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 1bc48f297..70c05f146 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1471,12 +1471,12 @@ def noise_volume(dimensions, temporal_noise *= temporal_sd # The mean in time of system noise needs to be zero, so subtract the - # means of the temporal noise in time and spatial noise + # means of the temporal noise in time temporal_noise_mean = np.mean(temporal_noise, 3).reshape(dimensions[0], dimensions[1], dimensions[2], 1) - temporal_noise = temporal_noise - (temporal_noise_mean - spatial_noise) + temporal_noise = temporal_noise - temporal_noise_mean # Save the combination system_noise = spatial_noise + temporal_noise @@ -2866,7 +2866,7 @@ def compute_signal_change(signal_function, # If you have only one magnitude value, duplicate the magnitude for each # timecourse you have if len(magnitude) == 1: - magnitude = np.ones((signal_function.shape[1], 1)) * magnitude[0] + magnitude *= signal_function.shape[1] # Scale all signals that to have a range of -1 to 1. This is # so that any values less than this will be scaled appropriately From ac1183bc6327920aa9e9c66fdcce1e970d3e0013 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Sun, 12 Aug 2018 22:49:30 -0400 Subject: [PATCH 46/51] Added documentation to clarify how nonbrain voxels affect SNR, how SNR is calculated and the importance of using unmasked data for generating templates --- brainiak/utils/fmrisim.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 70c05f146..32b875a94 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1126,7 +1126,7 @@ def _calc_sfnr(volume, def _calc_snr(volume, mask, dilation=5, - tr=None, + reference_tr=None, ): """ Calculate the the SNR of a volume Calculates the Signal to Noise Ratio, the mean of brain voxels @@ -1152,8 +1152,9 @@ def _calc_snr(volume, increases and the non-brain voxels (after baseline subtraction) more closely resemble a gaussian - tr : int - Integer specifying TR to calculate the SNR for + reference_tr : int or list + Specifies the TR to calculate the SNR for. If multiple are supplied + then it will use the average of them. Returns ------- @@ -1164,8 +1165,8 @@ def _calc_snr(volume, """ # If no TR is specified then take all of them - if tr is None: - tr = list(range(volume.shape[3])) + if reference_tr is None: + reference_tr = list(range(volume.shape[3])) # Dilate the mask in order to ensure that non-brain voxels are far from # the brain @@ -1176,8 +1177,8 @@ def _calc_snr(volume, mask_dilated = mask # Make a matrix of brain and non_brain voxels, selecting the timepoint/s - brain_voxels = volume[mask > 0][:, tr] - nonbrain_voxels = (volume[:, :, :, tr]).astype('float64') + brain_voxels = volume[mask > 0][:, reference_tr] + nonbrain_voxels = (volume[:, :, :, reference_tr]).astype('float64') # If you have multiple TRs if len(brain_voxels.shape) > 1: @@ -2079,7 +2080,10 @@ def mask_brain(volume, """ Mask the simulated volume This creates a mask specifying the approximate likelihood that a voxel is part of the brain. All values are bounded to the range of 0 to 1. An - appropriate threshold to isolate brain voxels is >0.2 + appropriate threshold to isolate brain voxels is >0.2. Critically, + the data that should be used to create a template shouldn't already be + masked/skull stripped. If it is then it will give in accurate estimates + of non-brain noise and corrupt estimations of SNR. Parameters ---------- From 7b8bf1139ccbedd7a85b675ea460ec53b2bfb215 Mon Sep 17 00:00:00 2001 From: CameronTEllis Date: Sat, 18 Aug 2018 12:05:47 -0400 Subject: [PATCH 47/51] Removed the balancing of spatial_sd based on temporal_sd --- brainiak/utils/fmrisim.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 32b875a94..3a84ef96b 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -1457,14 +1457,6 @@ def noise_volume(dimensions, spatial_noise = noise_volume(dimensions, spatial_noise_type) temporal_noise = noise_volume(dimensions_tr, temporal_noise_type) - # Since you are combining spatial and temporal noise, you need to - # subtract the variance of the two to get the spatial sd - if spatial_sd > temporal_sd: - spatial_sd = np.sqrt(spatial_sd ** 2 - temporal_sd ** 2) - else: - # If this is below zero then all the noise will be temporal - spatial_sd = 0 - # Make the system noise have a specific spatial variability spatial_noise *= spatial_sd From 9e41edc60ec93a7e44f06d3aee0b2a01d5053b09 Mon Sep 17 00:00:00 2001 From: Mingbo Cai Date: Sat, 18 Aug 2018 23:26:17 -0400 Subject: [PATCH 48/51] Update test_fmrisim.py Changed the argument `tr` to `reference_tr` for `_calc_snr` in the test code. --- tests/utils/test_fmrisim.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/utils/test_fmrisim.py b/tests/utils/test_fmrisim.py index 30d9e75fa..e7254dc23 100644 --- a/tests/utils/test_fmrisim.py +++ b/tests/utils/test_fmrisim.py @@ -707,7 +707,7 @@ def test_calc_noise(): no_dilation_snr = sim._calc_snr(noise_matched, mask, dilation=0, - tr=tr_duration, + reference_tr=tr_duration, ) assert nd_new['snr'] > no_dilation_snr, "Dilation did not increase SNR" From d6e3959612c438753c92c36c599b13096435d08c Mon Sep 17 00:00:00 2001 From: Mingbo Cai Date: Sun, 2 Sep 2018 09:09:51 -0400 Subject: [PATCH 49/51] adding a check that `magnitude` is indeed a list @CameronTEllis I added a line in 2864 of fmrisim.py to check for the type of `magnitude` (in case a numpy array of size 1 is fed). Please check if this is appropriate. --- brainiak/utils/fmrisim.py | 1 + 1 file changed, 1 insertion(+) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 3a84ef96b..77b812f05 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -2861,6 +2861,7 @@ def compute_signal_change(signal_function, # If you have only one magnitude value, duplicate the magnitude for each # timecourse you have + assert type(magniutde) is list, 'magnitude should be a list of floats' if len(magnitude) == 1: magnitude *= signal_function.shape[1] From 657d54d8c1fe800ff317aaeee206c91081955fc2 Mon Sep 17 00:00:00 2001 From: Mingbo Cai Date: Sun, 2 Sep 2018 09:27:15 -0400 Subject: [PATCH 50/51] Update fmrisim.py corrected a typo --- brainiak/utils/fmrisim.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 77b812f05..799f69937 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -2861,7 +2861,7 @@ def compute_signal_change(signal_function, # If you have only one magnitude value, duplicate the magnitude for each # timecourse you have - assert type(magniutde) is list, 'magnitude should be a list of floats' + assert type(magnitude) is list, 'magnitude should be a list of floats' if len(magnitude) == 1: magnitude *= signal_function.shape[1] From 276bdfd6f0b718ed1408c7b7ded8d9d2e01a9f4f Mon Sep 17 00:00:00 2001 From: Mingbo Cai Date: Sun, 2 Sep 2018 15:59:17 -0400 Subject: [PATCH 51/51] Update fmrisim.py Not sure why two checks were pending. Just made small changes to force it to retest. --- brainiak/utils/fmrisim.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/brainiak/utils/fmrisim.py b/brainiak/utils/fmrisim.py index 799f69937..41c5d73dd 100644 --- a/brainiak/utils/fmrisim.py +++ b/brainiak/utils/fmrisim.py @@ -2861,7 +2861,7 @@ def compute_signal_change(signal_function, # If you have only one magnitude value, duplicate the magnitude for each # timecourse you have - assert type(magnitude) is list, 'magnitude should be a list of floats' + assert type(magnitude) is list, '"magnitude" should be a list of floats' if len(magnitude) == 1: magnitude *= signal_function.shape[1]