Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Merge pull request #16 from racheldenison/master

Adding Rachel code folder to users
  • Loading branch information...
commit 121c97c15feb432ef1eb8d21b6faba0c8254cc42 2 parents 20f85d7 + f6f77a1
@racheldenison racheldenison authored
Showing with 14,760 additions and 0 deletions.
  1. +1 −0  silverlab_vista_tools/python_vista_utils/build/lib/vista_utils/__init__.py
  2. +40 −0 silverlab_vista_tools/python_vista_utils/build/lib/vista_utils/version.py
  3. +382 −0 silverlab_vista_tools/python_vista_utils/build/lib/vista_utils/vista_utils.py
  4. +150 −0 users/Rachel/LGN_Code/R_analysis/.Rhistory
  5. BIN  users/Rachel/LGN_Code/R_analysis/Protocol1_tseries_workspace_20101205.RData
  6. BIN  users/Rachel/LGN_Code/R_analysis/Protocol2-mostly-tseries_workspace_20101205.RData
  7. +98 −0 users/Rachel/LGN_Code/R_analysis/lgn_exploratory.R
  8. +80 −0 users/Rachel/LGN_Code/R_analysis/lgn_sineRegression.R
  9. +75 −0 users/Rachel/LGN_Code/R_analysis/lgn_sineRegressionAnalysis.R
  10. +105 −0 users/Rachel/LGN_Code/R_analysis/lgn_sineRegressionAnalysis_v2.R
  11. +36 −0 users/Rachel/LGN_Code/R_analysis/rd_tSideFSideComparison.m
  12. +32 −0 users/Rachel/LGN_Code/ang2pix.m
  13. BIN  users/Rachel/LGN_Code/bicolorMap.mat
  14. +52 −0 users/Rachel/LGN_Code/cg_qualityControl.m
  15. +279 −0 users/Rachel/LGN_Code/coherence_analysis/coherence_fmri_only_rd.m
  16. +413 −0 users/Rachel/LGN_Code/coherence_analysis/coherence_tutorial_solution_full_fmri.m
  17. +470 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/entries
  18. +87 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/text-base/GaussianSpectrum.m.svn-base
  19. +45 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/text-base/check_fields.m.svn-base
  20. +66 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/text-base/compute_srdata_means.m.svn-base
  21. +29 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/text-base/make_psth.m.svn-base
  22. +55 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/text-base/make_tfrep.m.svn-base
  23. +79 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/text-base/plot_tf_resp.m.svn-base
  24. +50 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/text-base/plot_tfrep.m.svn-base
  25. +20 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/text-base/preprocess_response.m.svn-base
  26. +172 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/text-base/preprocess_sound.m.svn-base
  27. +8 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/text-base/read_spikes_from_file.m.svn-base
  28. +11 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/text-base/rv.m.svn-base
  29. +25 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/text-base/spec_cmap.m.svn-base
  30. +108 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/text-base/timefreq.m.svn-base
  31. +87 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/GaussianSpectrum.m
  32. +45 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/check_fields.m
  33. +66 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/compute_srdata_means.m
  34. +29 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/make_psth.m
  35. +55 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/make_tfrep.m
  36. +79 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/plot_tf_resp.asv
  37. +79 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/plot_tf_resp.m
  38. +50 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/plot_tfrep.m
  39. +20 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/preprocess_response.m
  40. +172 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/preprocess_sound.m
  41. +8 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/read_spikes_from_file.m
  42. +11 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/rv.m
  43. +25 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/spec_cmap.m
  44. +108 −0 users/Rachel/LGN_Code/coherence_analysis/preprocessing/timefreq.m
  45. +79 −0 users/Rachel/LGN_Code/coherence_analysis/rd_plotVoxSNR.m
  46. +129 −0 users/Rachel/LGN_Code/coherence_analysis/rd_runVoxCoherence.m
  47. +266 −0 users/Rachel/LGN_Code/coherence_analysis/rd_voxCoherence.m
  48. +253 −0 users/Rachel/LGN_Code/coherence_analysis/simulation_code_20101110/coherence_simulation.m
  49. +73 −0 users/Rachel/LGN_Code/coherence_analysis/simulation_code_20101110/coherence_simulation_plots.m
  50. +56 −0 users/Rachel/LGN_Code/coherence_analysis/simulation_code_20101110/run_coherence_simulation.m
  51. +334 −0 users/Rachel/LGN_Code/coherence_analysis/validation/.svn/entries
  52. +31 −0 users/Rachel/LGN_Code/coherence_analysis/validation/.svn/text-base/compute_coherence_bound.m.svn-base
  53. +71 −0 users/Rachel/LGN_Code/coherence_analysis/validation/.svn/text-base/compute_coherence_dataset.m.svn-base
  54. +73 −0 users/Rachel/LGN_Code/coherence_analysis/validation/.svn/text-base/compute_coherence_full.m.svn-base
  55. +107 −0 users/Rachel/LGN_Code/coherence_analysis/validation/.svn/text-base/compute_coherence_mean.m.svn-base
  56. +31 −0 users/Rachel/LGN_Code/coherence_analysis/validation/.svn/text-base/compute_psth_halves.m.svn-base
  57. +200 −0 users/Rachel/LGN_Code/coherence_analysis/validation/.svn/text-base/df_mtchd_JN.m.svn-base
  58. +78 −0 users/Rachel/LGN_Code/coherence_analysis/validation/.svn/text-base/df_mtparam.m.svn-base
  59. +69 −0 users/Rachel/LGN_Code/coherence_analysis/validation/.svn/text-base/find_datasets.m.svn-base
  60. +40 −0 users/Rachel/LGN_Code/coherence_analysis/validation/.svn/text-base/get_filenames.m.svn-base
  61. +31 −0 users/Rachel/LGN_Code/coherence_analysis/validation/compute_coherence_bound.m
  62. +71 −0 users/Rachel/LGN_Code/coherence_analysis/validation/compute_coherence_dataset.m
  63. +73 −0 users/Rachel/LGN_Code/coherence_analysis/validation/compute_coherence_full.m
  64. +108 −0 users/Rachel/LGN_Code/coherence_analysis/validation/compute_coherence_mean.asv
  65. +107 −0 users/Rachel/LGN_Code/coherence_analysis/validation/compute_coherence_mean.m
  66. +31 −0 users/Rachel/LGN_Code/coherence_analysis/validation/compute_psth_halves.m
  67. +200 −0 users/Rachel/LGN_Code/coherence_analysis/validation/df_mtchd_JN.m
  68. +78 −0 users/Rachel/LGN_Code/coherence_analysis/validation/df_mtparam.m
  69. +69 −0 users/Rachel/LGN_Code/coherence_analysis/validation/find_datasets.m
  70. +40 −0 users/Rachel/LGN_Code/coherence_analysis/validation/get_filenames.m
  71. +7 −0 users/Rachel/LGN_Code/img2mat.m
  72. +60 −0 users/Rachel/LGN_Code/mrCallbackSteps.m
  73. +276 −0 users/Rachel/LGN_Code/old_safe_files/rd_mpDistribution_20110819.m
  74. +32 −0 users/Rachel/LGN_Code/pix2ang.m
  75. +84 −0 users/Rachel/LGN_Code/preproc/dicom2vista_org.py
  76. +78 −0 users/Rachel/LGN_Code/preproc/dicom2vista_pd.py
  77. +78 −0 users/Rachel/LGN_Code/preproc/dicom2vista_t1.py
  78. +84 −0 users/Rachel/LGN_Code/preproc/motioncorrect.py
  79. +56 −0 users/Rachel/LGN_Code/preproc/motioncorrect_pd.py
  80. +78 −0 users/Rachel/LGN_Code/preproc/motionparams.py
  81. +112 −0 users/Rachel/LGN_Code/preproc/previous/dicom2vista_SS.py
  82. +80 −0 users/Rachel/LGN_Code/preproc/previous/dicom2vista_graphonly.py
  83. +114 −0 users/Rachel/LGN_Code/preproc/previous/dicom2vista_rd.py
  84. +35 −0 users/Rachel/LGN_Code/preproc/spmDirectorySetup.py
  85. +144 −0 users/Rachel/LGN_Code/python_analysis/movie_analysis1.py
  86. +203 −0 users/Rachel/LGN_Code/python_analysis/subjects.py
  87. +9 −0 users/Rachel/LGN_Code/rd_adjustMrSesssion_PD.m
  88. +5 −0 users/Rachel/LGN_Code/rd_calculateCost.m
  89. +5 −0 users/Rachel/LGN_Code/rd_calculateCost2.m
  90. +31 −0 users/Rachel/LGN_Code/rd_centerOfMass.m
  91. +92 −0 users/Rachel/LGN_Code/rd_centerOfMass_multiVoxData.m
  92. +82 −0 users/Rachel/LGN_Code/rd_fTest.m
  93. +74 −0 users/Rachel/LGN_Code/rd_fTestBlock.m
  94. +62 −0 users/Rachel/LGN_Code/rd_fTestBlockGLM.m
  95. +108 −0 users/Rachel/LGN_Code/rd_fTestGLM.m
  96. +55 −0 users/Rachel/LGN_Code/rd_fTestTSeries.m
  97. +57 −0 users/Rachel/LGN_Code/rd_findCentersOfMass.m
  98. +52 −0 users/Rachel/LGN_Code/rd_findDuplicateROICoords.m
  99. +101 −0 users/Rachel/LGN_Code/rd_getTSeriesROI.m
  100. +107 −0 users/Rachel/LGN_Code/rd_lgnROIPipeline_20101013.m
  101. +145 −0 users/Rachel/LGN_Code/rd_lgnROIPipeline_20101028.m
  102. +165 −0 users/Rachel/LGN_Code/rd_lgnROIPipeline_20101118.m
  103. +101 −0 users/Rachel/LGN_Code/rd_lgnROIPipeline_mpLocalizer_20110127.m
  104. +110 −0 users/Rachel/LGN_Code/rd_lgnROIPipeline_mpLocalizer_20110818.m
  105. +128 −0 users/Rachel/LGN_Code/rd_lgnROIPipeline_mpLocalizer_20110819.m
  106. +8 −0 users/Rachel/LGN_Code/rd_loadPValsAsTopoData.m
  107. +96 −0 users/Rachel/LGN_Code/rd_makeConcatScanMovie.m
  108. +61 −0 users/Rachel/LGN_Code/rd_makeMeanEpi.m
  109. +122 −0 users/Rachel/LGN_Code/rd_meanTSeries.m
  110. +304 −0 users/Rachel/LGN_Code/rd_mpDistributionCorAnal.m
  111. +290 −0 users/Rachel/LGN_Code/rd_mpDistributionCorAnalOrtho.m
  112. +149 −0 users/Rachel/LGN_Code/rd_mpDistributionCorAnalSingle.m
  113. +269 −0 users/Rachel/LGN_Code/rd_mpDistributionZ.m
  114. +159 −0 users/Rachel/LGN_Code/rd_mpMetric.m
  115. +166 −0 users/Rachel/LGN_Code/rd_mpMetricSearch.m
  116. +41 −0 users/Rachel/LGN_Code/rd_mpPCA.m
  117. +14 −0 users/Rachel/LGN_Code/rd_mrAdjustDataTypesWC20110901.m
  118. +33 −0 users/Rachel/LGN_Code/rd_mrGLMReliability.m
  119. +88 −0 users/Rachel/LGN_Code/rd_mrMakeDesign_MPLocalizerColor.m
  120. +84 −0 users/Rachel/LGN_Code/rd_mrMakeDesign_MPLocalizerGLM.m
  121. +113 −0 users/Rachel/LGN_Code/rd_mrMakeDesign_MPLocalizerGLM_CondMapping.m
  122. +7 −0 users/Rachel/LGN_Code/rd_mrMakeMeanEpiAnat.m
  123. +150 −0 users/Rachel/LGN_Code/rd_mrMakeMrInit2Params.m
  124. +141 −0 users/Rachel/LGN_Code/rd_mrMakeMrInit2Params_OLD.m
  125. +61 −0 users/Rachel/LGN_Code/rd_mrMakeMrSESSIONFromSingleScanSESSION.m
  126. +18 −0 users/Rachel/LGN_Code/rd_plotData3D.m
  127. +30 −0 users/Rachel/LGN_Code/rd_plotDetrendedTSeries.m
  128. +145 −0 users/Rachel/LGN_Code/rd_plotOnBrain.m
  129. +204 −0 users/Rachel/LGN_Code/rd_plotOnBrainROIData.m
  130. +222 −0 users/Rachel/LGN_Code/rd_plotOnBrainROIData2.m
  131. +192 −0 users/Rachel/LGN_Code/rd_plotOnBrainZContrast.m
  132. +36 −0 users/Rachel/LGN_Code/rd_plotROISeries.m
  133. +16 −0 users/Rachel/LGN_Code/rd_plotSlices.m
  134. +21 −0 users/Rachel/LGN_Code/rd_plotTSeries.m
  135. +249 −0 users/Rachel/LGN_Code/rd_plotTopographicData.m
  136. +254 −0 users/Rachel/LGN_Code/rd_plotTopographicData2.m
  137. +284 −0 users/Rachel/LGN_Code/rd_plotTopographicData2Fn.m
  138. +261 −0 users/Rachel/LGN_Code/rd_plotTopographicData2SatFn.m
  139. +91 −0 users/Rachel/LGN_Code/rd_plotVoxelFFT.m
  140. +43 −0 users/Rachel/LGN_Code/rd_quickPlotBetaMaps.m
  141. +59 −0 users/Rachel/LGN_Code/rd_quickPlotBetaMapsSat.m
  142. +17 −0 users/Rachel/LGN_Code/rd_quickPlotMetricSearchTopo.m
  143. +25 −0 users/Rachel/LGN_Code/rd_quickPlotTopoHist.m
  144. +71 −0 users/Rachel/LGN_Code/rd_readRawDataMultiScan.m
  145. +38 −0 users/Rachel/LGN_Code/rd_roiInplaneCompare.m
  146. +58 −0 users/Rachel/LGN_Code/rd_showSideBySideSliceMovie.m
  147. +97 −0 users/Rachel/LGN_Code/rd_showSliceMovie.m
  148. +42 −0 users/Rachel/LGN_Code/rd_spmDiagnostics.m
  149. +6 −0 users/Rachel/LGN_Code/rd_spmGetValsFromImgAtCoords.m
  150. +39 −0 users/Rachel/LGN_Code/rd_spmMakeDesign_HemiBurst.m
  151. +16 −0 users/Rachel/LGN_Code/rd_spmMakeDesign_HemifieldMapping.m
  152. +33 −0 users/Rachel/LGN_Code/rd_spmMakeDesign_MPLocalizerGLM.m
  153. +41 −0 users/Rachel/LGN_Code/rd_spmROIDataFromImg.m
  154. +14 −0 users/Rachel/LGN_Code/rd_spmROITimecourse.m
  155. +41 −0 users/Rachel/LGN_Code/rd_voxelTSeries.m
  156. +22 −0 users/Rachel/LGN_Code/rd_voxsInGroupForTopo.m
  157. +16 −0 users/Rachel/LGN_Code/rd_zCombo.m
  158. +15 −0 users/Rachel/LGN_Code/rd_zDistanceCost.m
  159. +15 −0 users/Rachel/LGN_Code/rd_zDistanceCost2.m
  160. +75 −0 users/Rachel/LGN_Code/roi2voxruns.m
  161. +88 −0 users/Rachel/LGN_Code/roi2voxruns_combineVoxels.m
View
1  silverlab_vista_tools/python_vista_utils/build/lib/vista_utils/__init__.py
@@ -0,0 +1 @@
+from vista_utils import *
View
40 silverlab_vista_tools/python_vista_utils/build/lib/vista_utils/version.py
@@ -0,0 +1,40 @@
+"""nitime version/release information"""
+
+ISRELEASED = False
+
+# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
+_version_major = 0
+_version_minor = 1
+_version_micro = 'dev'
+__version__ = "%s.%s.%s" % (_version_major, _version_minor, _version_micro)
+
+
+## CLASSIFIERS = ["Development Status :: 3 - Alpha",
+## "Environment :: Console",
+## "Intended Audience :: Science/Research",
+## "License :: OSI Approved :: BSD License",
+## "Operating System :: OS Independent",
+## "Programming Language :: Python",
+## "Topic :: Scientific/Engineering"]
+
+description = "vista_utils: utilities for interfacing with data from mrVista"
+
+# Note: this long_description is actually a copy/paste from the top-level
+# README.txt, so that it shows up nicely on PyPI. So please remember to edit
+# it only in one place and sync it correctly.
+long_description = """
+"""
+
+NAME = "vista_utils"
+MAINTAINER = "Ariel Rokem"
+MAINTAINER_EMAIL = "arokem@gmail.com"
+DESCRIPTION = description
+LONG_DESCRIPTION = long_description
+AUTHOR = "Ariel Rokem"
+AUTHOR_EMAIL = "arokem@gmail.com"
+MAJOR = _version_major
+MINOR = _version_minor
+MICRO = _version_micro
+VERSION = __version__
+PACKAGES = ['vista_utils']
+REQUIRES = ["numpy", "matplotlib", "scipy"]
View
382 silverlab_vista_tools/python_vista_utils/build/lib/vista_utils/vista_utils.py
@@ -0,0 +1,382 @@
+#-----------------------------------------------------------------------------
+# mrVista utils
+# For the analysis of data created by the mrVista package
+#-----------------------------------------------------------------------------
+"""These utilities can be used for extracting and processing fMRI data analyzed
+using the Matlab toolbox mrVista (http://white.stanford.edu/mrvista)
+"""
+import numpy as np
+import scipy.io as sio
+
+import nitime.timeseries as ts
+import nitime.utils as tsu
+
+from nitime.fmri.io import time_series_from_file as load_nii
+
+__all__ = ['getROIcoords',
+ 'get_time_series_inplane',
+ 'detrend_tseries',
+ 'filter_coords',
+ 'upsample_coords',
+ 'vector_mean',
+ 'get_flat_ts']
+
+##---- getROIcoords: -----------------------------------------------
+def getROIcoords(ROI_file):
+ """Get the ROI coordinates for a given ROI and scan in the Gray
+
+ Parameters
+ ----------
+
+ ROI_file : string, full path to the ROI file
+
+ Output
+ ------
+
+ coords: int array. The x,y,z coordinates of the ROI.
+
+ Notes
+ -----
+ The order of x,y and z in the output may be slightly idiosyncratic and
+ depends on the data type in question
+
+ """
+
+ ROI_mat_file = sio.loadmat(ROI_file,squeeze_me=True,struct_as_record=False)
+
+ return ROI_mat_file['ROI'].coords
+
+
+##---- getTseries: -----------------------------------------------
+def get_time_series_inplane(coords,scan_file,
+ f_c=0.01,up_sample_factor=[1,1,1],
+ detrend=True,normalize=True,average=True,
+ TR=None):
+
+ """vista_get_time_series: Acquire a time series for a particular scan/ROI.
+
+ Parameters
+ ----------
+ coords: a list of arrays
+ each array holds the X,Y,Z locations of an ROI
+ (as represented in the Inplane)
+
+ scan_file: string, full path to the analyze file of the scan
+
+ TR: float the repetition time in the experiment
+
+ up_sample_factor: float
+ the ratio between the size of the inplane and the size of the gray
+ (taking into account FOV and number of voxels in each
+ dimension). Defaults to [1,1,1] - no difference
+
+ detrend: bool, optional
+ whether to detrend the signal. Default to 'True'
+
+ normalize: bool, optional
+ whether to transform the signal into % signal change. Default to 'True'
+
+ average: bool, optional
+ whether to average the resulting signal
+
+ Returns
+ -------
+ time_series: array, the resulting time_series
+ Depending on the averaging flag, can have the dimensions 1*time-points or
+ number-voxels*time-points.
+
+ Notes
+ -----
+
+ The order of the operations on the time-series is:
+ detrend(on a voxel-by-voxel basis) => normalize (on a voxel-by-voxel basis)
+ => average (across voxels, on a time-point-by-time-point basis)
+
+ """
+ from nibabel import load
+
+ #Get the nifti image object
+
+ print 'Reading data from %s' %scan_file
+ data = load(scan_file).get_data() #if using nipy.io.imageformats.load
+
+ #Adjusted the coordinates according to the ratio between the
+ #sampling in the gray and the sampling in the inplane, move the
+ #slice dimension to be the first one and change the indexing from
+ #1-based to 0-based. The coord order is as it is in the input, so need to
+ #make sure that it is correct on the input side.
+
+ this_data = data[np.round(coords[0]/up_sample_factor[0]).astype(int)-1,
+ np.round(coords[1]/up_sample_factor[1]).astype(int)-1,
+ np.round(coords[2]/up_sample_factor[2]).astype(int)-1]
+
+ if normalize:
+ this_data = tsu.percent_change(this_data)
+
+ if average:
+ this_data = np.mean(this_data,0)
+
+ time_series = ts.TimeSeries(data=this_data,sampling_interval=TR)
+
+ if detrend:
+ F = ta.FilterAnalyzer(this_bold,lb=f_c)
+ time_series = F.filtered_boxcar
+
+ return time_series
+
+#---detrend_tseries--------------------------------------------------------------
+def detrend_tseries(time_series,TR,f_c,n_iterations=2):
+ """ vista_detrend_tseries: detrending a-la DBR&DJH. A low-passed version is
+ created by convolving with a box-car and then the low-passed version is
+ subtracted from the signal, resulting in a high-passed version
+
+ Parameters
+ ----------
+
+ time_series: float array
+ the signal
+
+ TR: float
+ the sampling interval (inverse of the sampling rate)
+
+ f_c: float
+ the cut-off frequency for the high-/low-pass filtering. Default to 0.01 Hz
+
+ n_iterations: int, optional
+ how many rounds of smoothing to do (defaults to 2, based on DBR&DJH)
+
+ Returns
+ -------
+ float array: the signal, filtered
+ """
+ #Box-car filter
+ box_car = np.ones(np.ceil(1.0/(f_c/TR)))
+ box_car = box_car/(float(len(box_car)))
+ box_car_ones = np.ones(len(box_car))
+
+ #Input can be 1-d (for a single time-series), or 2-d (for a stack of
+ #time-series). Only in the latter case do we want to iterate over the
+ #length of time_series:
+
+ if len(time_series.shape) > 1:
+ for i in xrange(time_series.shape[0]):
+
+ #Detrending: Start by applying a low-pass to the signal.
+ #Pad the signal on each side with the initial and terminal
+ #signal value:
+
+ pad_s = np.append(box_car_ones * time_series[i][0],
+ time_series[i][:])
+ pad_s = np.append(pad_s, box_car_ones * time_series[i][-1])
+
+ #Filter operation is a convolution with the box-car(iterate,
+ #n_iterations times over this operation):
+ for i in xrange(n_iterations):
+ conv_s = np.convolve(pad_s,box_car)
+
+ #Extract the low pass signal by excising the central
+ #len(time_series) points:
+ #s_lp = conv_s[len(box_car):-1*len(box_car)]
+
+ #does the same as this?
+
+ s_lp= (conv_s[len(conv_s)/2-np.ceil(len(time_series[i][:])/2.0):
+ len(conv_s)/2+len(time_series[i][:])/2]) #ceil(/2.0)
+ #for cases where the time_series has an odd number of points
+
+ #Extract the high pass signal simply by subtracting the high pass
+ #signal from the original signal:
+
+ time_series[i] = time_series[i][:] - s_lp + np.mean(s_lp) #add mean
+ #to make sure that there are no negative values. This also seems to
+ #make sure that the mean of the signal (in % signal change) is close
+ #to 0
+
+
+ else: #Same exact thing, but with one less index:
+ pad_s = np.append(box_car_ones * time_series[0],time_series[:])
+ pad_s = np.append(pad_s, box_car_ones * time_series[-1])
+ for i in xrange(n_iterations):
+ conv_s = np.convolve(pad_s,box_car)
+ s_lp= (conv_s[len(conv_s)/2-np.ceil(len(time_series[:])/2.0):
+ len(conv_s)/2+len(time_series[:])/2])
+ time_series = time_series[:] - s_lp + np.mean(s_lp)
+
+
+ #Handle memory:
+ time_series_out = np.copy(time_series)
+
+ return time_series_out
+
+##---- vista_filter_coords: -----------------------------------------------
+
+def filter_coords(coords,filt,filt_thresh,up_sample_factor):
+
+ """Filter the coords in an ROI, by the value in some other image (for
+ example, the coherence in each of the voxels in the ROI)
+
+ Params
+ ------
+ filt: an array with the values to filter on
+
+ coords: the set of coordinates to filter
+
+ filt_thresh: only coordinates with filter>filter_thresh will be kep
+ Returns
+ -------
+ coords_out: array
+ a new set of coords, in the same space as the input
+
+ """
+ coords_temp = np.where(filt>filt_thresh)
+ coords_filt = np.vstack([coords_temp[0],coords_temp[1],coords_temp[2]])
+
+ newCoords = np.empty(coords.shape,dtype='int')
+ newCoords[0,:] = coords[0,:] / up_sample_factor[0] - 1 #Inplane
+ newCoords[1,:] = coords[1,:] / up_sample_factor[1] - 1 #Inplane
+ newCoords[2,:] = coords[2,:] / up_sample_factor[2] - 1 #Slices
+
+ coords_out = tsu.intersect_coords(newCoords,coords_filt)
+
+ return coords_out
+def upsample_coords(coords,up_sample_factor):
+ """up-sample coords from the gray resolution into the Inplane resolution,
+ from a set of input coords, given in the order [Inplane,Inplane,Slices] and
+ the up_sample_factor in each of these dimensions.
+
+ Takes into account the fact that the coords are off by one, due to Matlab's
+ 1-based indexing...
+
+ """
+ newCoords = np.empty(coords.shape,dtype=int)
+ #Inplane:
+ newCoords[0,:] = np.round(coords[0,:] / up_sample_factor[0] - 1).astype(int)
+ #Inplane:
+ newCoords[1,:] = np.round(coords[1,:] / up_sample_factor[1] - 1).astype(int)
+ #Slices:
+ newCoords[2,:] = np.round(coords[2,:] / up_sample_factor[2] - 1).astype(int)
+
+ return newCoords
+
+def vector_mean(coranal,scan_num,coords,upsamp,n_cycles):
+ """
+ Given an mrVista coranal (read in with sio.loadmat, squeeze_me=True,
+ struct_as_record=False), a scan number, coords into the arrays in the
+ coranal (and the appropriate upsampling factor) produce back the mean
+ amplitude, the mean phase and the meanStd for that scan as is done in:
+
+ mrLoadRet/Analysis/BlockAnalysis/vectorMean.m
+ """
+
+ coords= upsample_coords(coords,upsamp)
+
+ ph = coranal['ph'][scan_num][coords]
+ amp = coranal['amp'][scan_num][coords]
+ co = coranal['co'][scan_num][coords]
+
+ #This is simple:
+ mean_co = np.mean(co)
+
+ #This is a bit more complicated:
+ i = np.complex(0,1)
+ z = amp * np.exp(i*ph)
+
+ meanZ = np.mean(z)
+
+ mean_amp = np.abs(meanZ)
+ mean_ph = np.angle(meanZ)
+
+ #Compute the standard error of the complex quantity:
+ se_z = np.std(z)/np.sqrt(len(z))
+
+ #And compute another measure of error based on the coherence and amplitude:
+ mean_std = mean_amp * np.sqrt(((1/np.mean(co)**2))-1)
+
+ return mean_amp,mean_ph,se_z,mean_std/n_cycles
+
+
+def get_flat_ts(flat_dir,nii_file,mr_session,TR,up_samp=[1,1,1],
+ normalize='zscore',lb=0,ub=None):
+
+ """
+
+ Returns the flattened time-dependent data from a nifti file
+
+
+ Parameters
+ ----------
+ flat_dir: str
+ The full path to the flat directory containing the information for this
+ flat patch
+
+ nii_file: str,
+ The full path to the nifti file with the data.
+
+ mr_session: str
+ Full path to the mrSESSION.mat file, from which the alignment will be
+ pulled
+
+ TR: float
+ The TR used to sample the data
+
+ lb,ub: float
+ The cutoff points for a boxcar filter
+
+ Returns
+ -------
+
+ ts_out, flat_coords : list with [tseries_l,tseries_r]
+
+ """
+ coords_mat = sio.loadmat('%s/coords.mat'%flat_dir,squeeze_me=True)
+ flat_coords = coords_mat['coords']
+ gray_coords = coords_mat['grayCoords']
+
+ # Add ones to the end, to make the shape work for the transformation with
+ # the alignment matrix, below:
+ gray_coords = [np.vstack([gray_coords[i],np.ones(gray_coords[i].shape[-1])])
+ for i in range(2)] # 2 hemispheres
+
+ mrSESSION = sio.loadmat(mr_session,squeeze_me=True,struct_as_record=True)
+
+ # The following extracts the alignment matrix from Inplane to Volume
+ # coordinates:
+ alignment = np.matrix(mrSESSION['mrSESSION']['alignment'][np.newaxis][0].squeeze())
+
+ # The mrSESSION alignment matrix is the one that aligns from Inplane to
+ # Volume. For this, we want the inverse:
+ alignment = alignment.getI()
+
+ # And we only need the 4 by 4 matrix:
+ alignment = alignment[:3,:]
+
+ # Do the transformation for both hemispheres, upsample, and then round , so
+ # that we get the Inplane coords:
+ inplane_coords = [np.round(upsample_coords(alignment * gray_coords[i],
+ up_samp)) for i in range(2)]
+
+ # Get the data from the nifti file in question, while boxcar filtering into
+ # the frequency range defined by the input::
+ tseries = load_nii(nii_file,inplane_coords,TR,normalize=normalize,
+ filter=dict(method='boxcar',lb=lb,ub=ub),
+ verbose=True)
+
+ print ('Assigning data to flat coordinates')
+ im_size = tuple(coords_mat['imSize'])
+
+ # Make the TimeSeries to fill with data (one for each hemisphere):
+ ts_out = []
+
+ # Loop over hemispheres:
+ for hemi_idx in range(2):
+ # Add a TimeSeries with the right shape:
+ ts_out.append(ts.TimeSeries(data=np.ones(
+ np.hstack([im_size,tseries[hemi_idx].shape[-1]]))*np.nan,
+ sampling_interval=TR))
+
+ idx = tuple(np.round(flat_coords[hemi_idx]-1).astype(int))
+ my_t = tseries[hemi_idx].time
+ for t in my_t:
+ ts_out[-1].data[...,my_t.index_at(t)][idx]=tseries[hemi_idx].at(t)
+
+ return ts_out,flat_coords
View
150 users/Rachel/LGN_Code/R_analysis/.Rhistory
@@ -0,0 +1,150 @@
+fm1[0]
+fm1[1]
+fm1[2]
+fm1[3]
+fm1[8]
+TukeyHSD(fm1[8],"Subject")
+TukeyHSD(fm1[8][1],"Subject")
+TukeyHSD(fm1[8][1][1],"Subject")
+fm1[8][1][1]
+fm1[8]
+fm1[8][1]
+fm1[8]
+help(fm1[8])
+glht(fm1)
+exit
+q
+quit
+quit()
+data = read.table('~/Projects/AntiQ/mean_RT4R.txt',header = TRUE)
+attach(data)
+lme
+RT
+l.mod <- lme(RT~Cue*Drug*SOA,random=~1 | Subject/(Cue*Drug*SOA))
+l.mod <- lme(RT~Cue+Drug+SOA,random=~1 | Subject/(Cue*Drug*SOA))
+l.mod <- lme(RT~Cue+Drug+SOA,random=~1 | Subject/(Cue+Drug+SOA))
+l.mod <- lme(RT~Cue+Drug+SOA,random=~1 | Subject/(SOA))
+anova(l.mod)
+l.mod <- lme(RT~Cue+Drug+SOA,random=~1 | Subject/(SOA*Drug*SOA))
+l.mod <- lme(RT~Cue+Drug+SOA,random=~1 | Subject/SOA)
+anova(l.mod)
+l.mod <- lme(RT~Cue+Drug+SOA,random=~1 | Subject/Drug)
+anova(l.mod)
+l.mod <- lme(RT~Cue+Drug+SOA,random=~1 | Subject/(Cue*Drug*SOA))
+l.mod <- lme(RT~Cue+Drug+SOA,random=~3 | Subject/(Cue*Drug*SOA))
+l.mod <- lme(RT~Cue+Drug+SOA,random=~2 | Subject/(Cue*Drug*SOA))
+l.mod <- lme(RT~Cue+Drug+SOA,random=~2 | Subject/(Cue*Drug*SOA),data=data)
+l.mod <- lme(RT~(Cue*Drug*SOA),random=~2 | Subject/(Cue*Drug*SOA),data=data)
+l.mod <- lme(RT~(Cue*Drug*SOA),random=~2 | Subject/(Cue),data=data)
+anova(l.mod)
+l.mod <- lme(RT~(Cue*Drug*SOA),random=~2 | Subject/(Drug),data=data)
+anova(l.mod)
+l.mod <- lme(RT~(Cue*Drug*SOA),random=~2 | Subject/(SOA),data=data)
+anova(l.mod)
+l.mod <- lme(RT~(Cue*Drug*SOA),random=~3 | Subject/(SOA),data=data)
+anova(l.mod)
+l.mod <- lme(RT~(Cue*Drug*SOA),random=~1 | Subject/(SOA),data=data)
+anova(l.mod)
+l.mod <- lme(RT~(Cue*Drug*SOA),random=~1 | Subject,data=data)
+anova(l.mod)
+l.mod <- lme(RT~(Cue+Drug+SOA),random=~1 | Subject,data=data)
+anova(l.mod)
+l.mod <- lme(RT~(Cue*Drug*SOA),random=~1 | Subject,data=data)
+anova(l.mod)
+l.mod <- lme(RT~(Cue*Drug*SOA),random=~1 | Subject/,data=data)a
+AQ
+plot(AQ)
+library(HH)
+?MMC
+glht.mmc(AQ)
+?TukeyHSD
+l.mod <- lme(RT~(Cue*Drug*SOA),random=~1 | Subject/SOA,data=data)
+TukeyHSD(l.mod)
+l.mod <- aov(RT~(Cue*Drug*SOA))
+l.mod
+TukeyHSD(l.mod)
+l.mod(RT~(Cue*Drug*SOA))
+l <- aov(RT~(Cue*Drug*SOA))
+l
+TukeyHSD(l)
+cd
+data = read.table('~/Projects/bromo_new/RT_rand.txt',header = TRUE)
+data = read.delim('~/Projects/bromo_new/RT_anti.txt')
+data
+data = read.table('~/Projects/textureDiscrim_patch/Analysis/d_prime.txt')
+attach(data)
+data
+AQ =aov(d_prime~(Eccentricity*Drug*Order)+Error(Subject/(Eccentricity*Drug)+Order
+)
+data = read.table('~/Projects/textureDiscrim_patch/Analysis/d_prime.txt',header=TRUE)
+attach(data)
+AQ =aov(d_prime~(Eccentricity*Drug*Order)+Error(Subject/(Eccentricity*Drug)+Order))
+require(lme)
+require(nlme)
+nlme
+nlme?
+q
+help(nlme)
+fm1 <- nlme(d_prime~(SSasymp(Eccentricity,Drug,Order)))
+fm1 <- nlme(d_prime~(SSasymp(Eccentricity,Drug,Order),fixed=Eccentricity,random=Order))
+fm1 <- nlme(d_prime~(SSasymp(Eccentricity,Drug,Order)),fixed=Eccentricity,random=Order))
+fm1 <- nlme(d_prime~(SSasymp(Eccentricity,Drug,Order)),fixed=Eccentricity,random=Order)
+fm1 <- nlme(d_prime~SSasymp(Eccentricity,Drug,Order),fixed=Eccentricity,random=Order)
+fm1 <- nlme(d_prime~SSasymp(Eccentricity,Drug,Order),fixed=Eccentricity,random=Order,data=data)
+fm1 <- nlme(d_prime ~ Eccentricity, random=~1|Subject/Order)
+fm1 <- nlme(d_prime ~ Eccentricity, random=~1|Subject/Order, data=data)
+require(pplot)
+require(plot)
+require(gplot)
+pwd
+ls
+cd ~
+ls
+ls('.')
+cd
+cd?
+d
+help(cd)
+help(pwd)
+??pwd
+ls('.')
+ls
+ls()
+del(AQ)
+rm(AQ)
+ls()
+rm(data)
+getwd
+getwd()
+history()
+q()
+Anova
+require(car)
+Anova?
+q
+Anova
+help(Anova)
+help(lm)
+require(ez)
+help(ez)
+help(ezPrecis)
+source("/Users/arokem/learnR/R Workflow script Geoff Robinson.R")
+install.packages('googleVis')
+Fruits
+require(googleVis)
+Fruits
+M <- gvisMotionChart(Fruits)
+M <- gvisMotionChart(Fruits,idvar="Fruit",timevar="Year")
+plot(M)
+M <- gvisMotionChart(Fruits,idvar="Fruit",timevar="Year")
+plot(M)
+Exit
+quit()
+library(googleVis)
+M <- gvisMotionChart(Fruits, idvar="Fruit", timevar="Year")
+plot(M)
+plot(M)
+?googleVis
+help(googleVis)
+help(googleVis)
+help(googleVis)
View
BIN  users/Rachel/LGN_Code/R_analysis/Protocol1_tseries_workspace_20101205.RData
Binary file not shown
View
BIN  users/Rachel/LGN_Code/R_analysis/Protocol2-mostly-tseries_workspace_20101205.RData
Binary file not shown
View
98 users/Rachel/LGN_Code/R_analysis/lgn_exploratory.R
@@ -0,0 +1,98 @@
+# lgn_exploratory.R
+
+# set working directory
+setwd('/Volumes/Plata1/LGN_Localizer/Scans/MAS_20100212_session/Run02_MoreCoverage/MAS_20100212-2_MC/ROIAnalysis/')
+
+# read in data (another time try read.table)
+#roi <- read.csv('dat_files/lgnROI1-2_AvgScan1-4_20101028.dat', header=FALSE)
+roi <- read.csv('dat_files/lgnROI3_AvgScan1-4_20101028.dat', header=FALSE)
+roi <- read.csv('dat_files/lgnROI3_AvgScan1-6_20101029.dat', header=FALSE)
+
+# set up time series
+TR <- 3
+roi <- ts(data=roi, start=TR, frequency=1/TR)
+t <- ts(data=1:dim(roi)[1]*TR, start=TR, frequency=1/TR)
+
+# stim params
+stim_freq = 1/30
+stim_freq_TR = stim_freq*TR
+
+# time series mean
+roi_mean <- apply(X = roi, MARGIN = 1, FUN = mean)
+roi_mean <- ts(data=roi_mean, start=TR, frequency=1/TR)
+
+# plot all voxels and mean tseries
+matplot(roi, type="l", xlab='Time (TRs)', ylab='% signal change')
+matlines(roi_mean, type="l", lwd=3)
+
+# trying out some basic stats
+summary(roi_mean)
+
+qqnorm(roi_mean)
+qqline(roi_mean)
+
+roi_mean_diff <- diff(roi_mean, lag=1, differences=1)
+plot(roi_mean_diff, xlab='Time (TRs)', ylab='Lag 1 difference', type='l')
+
+lag.plot(roi_mean, lags=16)
+
+acf(roi_mean)
+pacf(roi_mean)
+
+arima(roi_mean)
+myspec <- spectrum(roi_mean)
+
+
+# let's try fitting a sine wave regression for the mean signal
+reg1 = cos(2*pi*stim_freq*t)
+reg2 = sin(2*pi*stim_freq*t)
+
+fit <- lm(roi_mean ~ 0 + reg1 + reg2) # 0 since no intercept
+fit_sum <- summary(fit)
+
+ste <- fit_sum$coefficients[,2]
+tval <- fit_sum$coefficients[,3]
+pval <- fit_sum$coefficients[,4]
+
+beta = fit$coefficients
+phase = atan(-beta[2]/beta[1])
+amp = beta[1]/cos(phase)
+vals = amp*cos(2*pi*stim_freq*t + phase)
+
+#fitted_values = ts(data=fit$fitted.values, start=TR, frequency=1/TR)
+#fitted_values2 = beta[1]*cos(2*pi*stim_freq*t) + beta[2]*sin(2*pi*stim_freq*t)
+
+plot(roi_mean)
+lines(vals, type='o')
+
+
+
+
+
+#######################
+# general useful stuff
+#######################
+
+# make a new figure
+quartz()
+
+# clear all
+rm(list = ls())
+
+# save the current figure
+dev.copy2pdf(file='fname.pdf')
+pdf('mygraph.pdf')
+
+# read dat file
+roi3mts <- read.csv('dat_files/roi3MTS_vox3-14-19-24.dat', header = FALSE)
+
+# write mat file
+writeMat('mat_files/sigVoxROIIdx.mat', roi1_idx=roi1_idx, roi2_idx=roi2_idx)
+
+# install and load matlab packages
+install.packages("R.matlab")
+hbLite(c("R.oo", "R.utils"), CRAN=TRUE)
+library("R.matlab")
+
+library(gplots)
+plotCI
View
80 users/Rachel/LGN_Code/R_analysis/lgn_sineRegression.R
@@ -0,0 +1,80 @@
+# sine wave regression
+#
+# assume roi data and stim params are already in the workspace
+
+reg1 = cos(2*pi*stim_freq*t)
+reg2 = sin(2*pi*stim_freq*t)
+
+nvox = dim(roi)[2]
+
+fit_ste <- matrix(data=NA, nrow=2, ncol=nvox, byrow = FALSE)
+fit_tval <- matrix(data=NA, nrow=2, ncol=nvox, byrow = FALSE)
+fit_pval <- matrix(data=NA, nrow=2, ncol=nvox, byrow = FALSE)
+
+fit_beta <- matrix(data=NA, nrow=2, ncol=nvox, byrow = FALSE)
+fit_phase <- matrix(data=NA, nrow=1, ncol=nvox, byrow = FALSE)
+fit_amp <- matrix(data=NA, nrow=1, ncol=nvox, byrow = FALSE)
+fit_vals <- matrix(data=NA, nrow=length(t), ncol=nvox, byrow = FALSE)
+
+for(vox in 1:nvox){
+
+fit <- lm(roi[,vox] ~ 0 + reg1 + reg2) # 0 since no intercept
+fit_sum <- summary(fit)
+
+ste <- fit_sum$coefficients[,2]
+tval <- fit_sum$coefficients[,3]
+pval <- fit_sum$coefficients[,4]
+
+beta = fit$coefficients
+phase = atan(-beta[2]/beta[1])
+amp = beta[1]/cos(phase)
+vals = amp*cos(2*pi*stim_freq*t + phase)
+
+fit_ste[,vox] <- ste
+fit_tval[,vox] <- tval
+fit_pval[,vox] <- pval
+
+fit_beta[,vox] <- beta
+fit_phase[vox] <- phase
+fit_amp[vox] <- amp
+fit_vals[,vox] <- vals
+
+}
+
+fit_vals <- ts(data=fit_vals, start=TR, frequency=1/TR)
+resids = roi - fit_vals
+
+# can change all amps to positive so phase-amp combos are unique
+# change phases to match, and then make aure all remaining phases are positive
+fit_amp_pos <- fit_amp
+fit_amp_pos[fit_amp<0] <- fit_amp_pos[fit_amp<0]*(-1)
+fit_phase_pos = fit_phase
+fit_phase_pos[fit_amp<0] <- fit_phase_pos[fit_amp<0] + pi
+fit_phase_pos[fit_phase_pos<0] <- fit_phase_pos[fit_phase_pos<0] + 2*pi
+
+
+# plot
+sample_vox = 1
+plot(roi[,sample_vox])
+lines(fit_vals[,sample_vox], col='red')
+
+# check resids
+sqe <- colMeans(resids[,sig_amp_idx]^2)
+mean(sqe)
+
+par(mfrow=c(1,2))
+sample_vox = 2
+sample_vox = sig_amp_idx[4]
+qqnorm(resids[,sample_vox])
+qqline(resids[,sample_vox])
+dev.copy2pdf(file='figures/R_figs/roi3_residQQNorm_vox2_vox59.pdf')
+
+plot(resids[,sample_vox], ylab='% signal change', main='Residuals')
+spectrum(resids[,sample_vox], main='Residuals periodogram')
+dev.copy2pdf(file='figures/R_figs/roi3_residTSeriesPeriodogram_vox2.pdf')
+
+plot(1:nvox,fit_pval[1,])
+points(1:21,fit_pval[2,], col='blue')
+abline(.05,0)
+
+
View
75 users/Rachel/LGN_Code/R_analysis/lgn_sineRegressionAnalysis.R
@@ -0,0 +1,75 @@
+# sine wave regression analysis
+#
+# finds voxels with significant parameter fits, divides into ROIs based on phase, and plots data from these voxels
+# assume roi data and stim params are already in the workspace, and lgn_sineRegression.R has been run
+
+# which p-vals are < 0.05?
+sig_amp_idx <- which(fit_pval[1,]<0.05)
+sig_phase_idx <- which(fit_pval[2,]<0.05)
+
+# which voxels have significant amp AND phase
+sig_vox <- matrix(0, nvox, 2)
+sig_vox[sig_amp_idx,1] <- 1
+sig_vox[sig_phase_idx,2] <- 1
+sig_ap_idx <- which(rowSums(sig_vox)==2)
+
+# look at the phases of these voxels
+par(mfrow=c(3,1))
+hist(fit_phase[sig_amp_idx], breaks=30)
+hist(fit_phase[sig_phase_idx], breaks=30)
+hist(fit_phase[sig_ap_idx], breaks=10)
+
+hist(fit_phase[sig_vox[,1] & fit_amp<0], breaks = 16, xlim=c(-1,1))
+hist(fit_phase[sig_vox[,1] & fit_amp>0], breaks = 16, xlim=c(-1,1))
+
+
+# let's say for now that phases<0 are roi1 and phases>0 are roi2
+sig_amp_roi1 <- roi[,sig_vox[,1] & fit_amp<0]
+sig_amp_roi2 <- roi[,sig_vox[,1] & fit_amp>0]
+sig_phase_roi1 <- roi[,sig_vox[,2] & fit_phase<0]
+sig_phase_roi2 <- roi[,sig_vox[,2] & fit_phase>0]
+sig_ap_roi1 <- roi[,sig_vox[,1] & sig_vox[,2] & fit_phase<0]
+sig_ap_roi2 <- roi[,sig_vox[,1] & sig_vox[,2] & fit_phase>0]
+
+# let's look at just the voxels with a sig amp and/or sig phase
+sig_amp_roi1_mean <- ts(data=rowMeans(sig_amp_roi1), start=TR, frequency=1/TR)
+sig_amp_roi2_mean <- ts(data=rowMeans(sig_amp_roi2), start=TR, frequency=1/TR)
+sig_phase_roi1_mean <- ts(data=rowMeans(sig_phase_roi1), start=TR, frequency=1/TR)
+sig_phase_roi2_mean <- ts(data=rowMeans(sig_phase_roi2), start=TR, frequency=1/TR)
+sig_ap_roi1_mean <- ts(data=rowMeans(sig_ap_roi1), start=TR, frequency=1/TR)
+sig_ap_roi2_mean <- ts(data=rowMeans(sig_ap_roi2), start=TR, frequency=1/TR)
+
+# look at sine wave fits for these means
+sig_amp_roi1_fit <- lm(sig_amp_roi1_mean ~ 0 + reg1 + reg2)
+sig_amp_roi2_fit <- lm(sig_amp_roi2_mean ~ 0 + reg1 + reg2)
+sig_phase_roi1_fit <- lm(sig_phase_roi1_mean ~ 0 + reg1 + reg2)
+sig_phase_roi2_fit <- lm(sig_phase_roi2_mean ~ 0 + reg1 + reg2)
+sig_ap_roi1_fit <- lm(sig_ap_roi1_mean ~ 0 + reg1 + reg2)
+sig_ap_roi2_fit <- lm(sig_ap_roi2_mean ~ 0 + reg1 + reg2)
+
+
+# plot sig roi means and fits
+# roi1
+par(mfrow=c(3,1))
+plot(sig_amp_roi1_mean)
+lines(ts(data=sig_amp_roi1_fit$fitted.values, start=TR, frequency=1/TR), col='red')
+plot(sig_phase_roi1_mean)
+lines(ts(data=sig_phase_roi1_fit$fitted.values, start=TR, frequency=1/TR), col='red')
+plot(sig_ap_roi1_mean)
+lines(ts(data=sig_ap_roi1_fit$fitted.values, start=TR, frequency=1/TR), col='red')
+
+# roi2
+par(mfrow=c(3,1))
+plot(sig_amp_roi2_mean)
+lines(ts(data=sig_amp_roi2_fit$fitted.values, start=TR, frequency=1/TR), col='red')
+plot(sig_phase_roi2_mean)
+lines(ts(data=sig_phase_roi2_fit$fitted.values, start=TR, frequency=1/TR), col='red')
+plot(sig_ap_roi2_mean)
+lines(ts(data=sig_ap_roi2_fit$fitted.values, start=TR, frequency=1/TR), col='red')
+
+plot(sig_amp_roi1_mean)
+lines(sig_amp_roi2_mean)
+
+
+
+
View
105 users/Rachel/LGN_Code/R_analysis/lgn_sineRegressionAnalysis_v2.R
@@ -0,0 +1,105 @@
+# sine wave regression analysis
+#
+# finds voxels with significant parameter fits, divides into ROIs based on phase, and plots data from these voxels
+# assume roi data and stim params are already in the workspace, and lgn_sineRegression.R has been run
+
+# plot acf for select voxel
+par(mfrow=c(1,2))
+acf(roi3mts[,3], main='Raw')
+acf(roi3mtsp[,3], main='Detrended')
+dev.copy2pdf(file='figures/detrend_figs/roi3_vox0019_detrendACF2.pdf')
+
+# plot amps and phases with standard errors
+par(mfrow=c(1,1))
+plotCI(fit_phase_pos, fit_amp_pos, fit_ste[1,], xlab='phase', ylab='amplitude', main='Parameter estimates')
+plotCI(fit_phase_pos, fit_amp_pos, fit_ste[1,], err='x', add=TRUE)
+dev.copy2pdf(file='figures/R_figs/roi3_phaseAmp_paramEstimates.pdf')
+
+# which p-vals are < 0.05? (only amp matters, we don't care if phase is different from zero)
+sig_amp_idx <- which(fit_pval[1,]<0.05)
+
+# which voxels have significant amp
+sig_vox <- matrix(0, nvox, 1)
+sig_vox[sig_amp_idx,1] <- 1
+
+# look at the phases of these voxels
+par(mfrow=c(2,1))
+hist(fit_phase[sig_vox[,1] & fit_amp<0], breaks = 16, xlim=c(-1,1))
+hist(fit_phase[sig_vox[,1] & fit_amp>0], breaks = 16, xlim=c(-1,1))
+
+par(mfrow=c(1,1))
+hist(fit_phase_pos[sig_vox[,1]==1], breaks = 30, xlim=c(0,2*pi), main='Voxels with amplitude fit p<0.05', xlab='Phase')
+dev.copy2pdf(file='figures/R_figs/roi1-2p_sig_vox_phases.pdf')
+
+# let's say for now that amp<0 are roi1 and amp>0 are roi2, sig amp voxels only
+roi1 <- roi[,sig_vox[,1] & fit_amp<0]
+roi2 <- roi[,sig_vox[,1] & fit_amp>0]
+
+# roi means
+roi1_mean <- ts(data=rowMeans(roi1), start=TR, frequency=1/TR)
+roi2_mean <- ts(data=rowMeans(roi2), start=TR, frequency=1/TR)
+
+# look at sine wave fits for these means
+roi1_fit <- lm(roi1_mean ~ 0 + reg1 + reg2)
+roi2_fit <- lm(roi2_mean ~ 0 + reg1 + reg2)
+
+# plot sig amp roi means and fits
+par(mfrow=c(2,1))
+plot(roi1_mean)
+lines(ts(data=roi1_fit$fitted.values, start=TR, frequency=1/TR), col='red')
+plot(roi2_mean)
+lines(ts(data=roi2_fit$fitted.values, start=TR, frequency=1/TR), col='red')
+
+plot(roi1_mean)
+lines(roi2_mean)
+
+
+# now let's divide into rois based on positive-restricted phase, sig amp voxels only
+roi1p <- roi[,sig_vox[,1] & fit_phase_pos>2 & fit_phase_pos<5]
+roi2p <- roi[,sig_vox[,1] & (fit_phase_pos<2 | fit_phase_pos>5)]
+
+# roi means
+roi1p_mean <- ts(data=rowMeans(roi1p), start=TR, frequency=1/TR)
+roi2p_mean <- ts(data=rowMeans(roi2p), start=TR, frequency=1/TR)
+
+# look at sine wave fits for these means
+roi1p_fit <- lm(roi1p_mean ~ 0 + reg1 + reg2)
+roi2p_fit <- lm(roi2p_mean ~ 0 + reg1 + reg2)
+
+# plot sig amp roi means and fits
+par(mfrow=c(2,1))
+plot(roi1p_mean, main='Mean time series and model fit - population 1', ylab='% signal change')
+lines(ts(data=roi1p_fit$fitted.values, start=TR, frequency=1/TR), col='red')
+plot(roi2p_mean, main='Mean time series and model fit - population 2', ylab='% signal change')
+lines(ts(data=roi2p_fit$fitted.values, start=TR, frequency=1/TR), col='red')
+dev.copy2pdf(file='figures/R_figs/roi1-2p_tseries_fits.pdf')
+
+par(mfrow=c(1,1))
+plot(roi1p_mean, main='Mean time series and model fits', ylab='% signal change')
+lines(roi2p_mean)
+lines(ts(data=roi1p_fit$fitted.values, start=TR, frequency=1/TR), col='red')
+lines(ts(data=roi2p_fit$fitted.values, start=TR, frequency=1/TR), col='blue')
+legend('topright', c('Population 1','Population 2'),col=c('red','blue'),pch=c('-','-'))
+dev.copy2pdf(file='figures/R_figs/roi1-2p_tseries_fits_overlay.pdf')
+
+plot(roi1p_mean)
+lines(roi2p_mean)
+
+# ** it turns out that roi and roip are the same! **
+# which voxels are these?
+roi1_idx = which(sig_vox[,1] & fit_phase_pos>2 & fit_phase_pos<5)
+roi2_idx = which(sig_vox[,1] & (fit_phase_pos<2 | fit_phase_pos>5))
+
+# store their pvals and tvals and phases
+roi1_pval = fit_pval[1,roi1_idx]
+roi2_pval = fit_pval[1,roi2_idx]
+
+roi1_tval = abs(fit_tval[1,roi1_idx])
+roi2_tval = abs(fit_tval[1,roi2_idx])
+
+roi1_phase = fit_phase_pos[roi1_idx]
+roi2_phase = fit_phase_pos[roi2_idx]
+
+# save roi data to mat file
+writeMat('mat_files/sigVoxROIData.mat', roi1_idx=roi1_idx, roi2_idx=roi2_idx, roi1_pval=roi1_pval, roi2_pval=roi2_pval, roi1_tval=roi1_tval, roi2_tval=roi2_tval, roi1_phase=roi1_phase, roi2_phase=roi2_phase)
+
View
36 users/Rachel/LGN_Code/R_analysis/rd_tSideFSideComparison.m
@@ -0,0 +1,36 @@
+% rd_tsideFSideComparison.m
+%
+% Compare time-side analysis done in R to frequency-side analysis done in
+% Matlab.
+
+%% load ROI data with freq-side analysis
+load lgnROI1-2Data_AvgScan1-4_20101028
+csvwrite('dat_files/lgnROI1-2_AvgScan1-4_20101028.dat',roi) % export for R
+
+%% load time-side analysis info from R, indices of voxels with significant
+% amp fits, divided into two ROIs based on phase
+load('mat_files/sigVoxROIIdx.mat')
+
+%% initializations
+nvox = size(lgnROI3,2);
+
+%% show z-score and roi mapping for each voxel
+figure
+scatter(1:nvox, data3.zScore)
+hold on
+scatter(roi1_idx, data3.zScore(roi1_idx),'.r')
+scatter(roi2_idx, data3.zScore(roi2_idx),'.y')
+legend('all voxels','ROI1','ROI2')
+xlabel('Voxel number')
+ylabel('z-score')
+
+%% coordinates of significant voxels
+roi1_coords = lgnROI3Coords(:,roi1_idx);
+roi2_coords = lgnROI3Coords(:,roi2_idx);
+zthresh_coords = lgnROI3Coords(:,zThresh(1).voxNums);
+
+figure
+hold on
+scatter3(roi1_coords(1,:), roi1_coords(2,:), roi1_coords(3,:))
+scatter3(roi2_coords(1,:), roi2_coords(2,:), roi2_coords(3,:),'r')
+scatter3(zthresh_coords(1,:), zthresh_coords(2,:), zthresh_coords(3,:),'.g')
View
32 users/Rachel/LGN_Code/ang2pix.m
@@ -0,0 +1,32 @@
+function p = ang2pix(ang, screen_dim, screen_res, view_dist, angle_type)
+
+% function p = ang2pix(ang, screen_dim, screen_res, view_dist, angle_type)
+%
+% ang = visual angle in degrees
+% p = pixel distance in pixels
+% angle_type = 'central' or 'radial'. defaults to central.
+%
+% view_dist = viewing distance in preferred units (eg. cm)
+% screen_dim = a screen dimension (eg. width of screen) in preferred units (eg. cm)
+% screen_res = resolution of that screen dimension (eg. width) in pixels
+%
+% Note that 1 pi radians = 180 degrees. Also note that screen_dim/screen_res
+% is just a ratio to convert between pixels and the units you used to measure
+% viewing distance.
+%
+% By Rachel Denison
+
+if ~exist('angle_type','var')
+ angle_type = 'central';
+end
+
+switch angle_type
+ case{'central',[]}
+
+ p = 2 * view_dist * tan((ang/2)*(pi/180)) * (screen_res/screen_dim);
+
+ case{'radial'}
+
+ p = view_dist * tan(ang*(pi/180)) * (screen_res/screen_dim);
+end
+
View
BIN  users/Rachel/LGN_Code/bicolorMap.mat
Binary file not shown
View
52 users/Rachel/LGN_Code/cg_qualityControl.m
@@ -0,0 +1,52 @@
+function quality_control(sub)
+%quality control script
+
+%directory structure
+dataDir = '/home/despo/cgratton/data/FaceSpace_loc/';
+subDir = [dataDir 'Data/' sub '/'];
+tsDir = '/home/despo/cgratton/gitrepos/podNtools/';
+addpath(tsDir);
+scriptDir = '/home/despo/cgratton/gitrepos/studies/Study_FaceMapping/';
+
+%types
+types = {'faceattend_highRES', 'facescene_loc_highSNR','facespace_loc_highRES'};
+%types = {'faceattend_highSNR', 'facescene_loc_highSNR','facespace_loc_highSNR'};
+
+for i=1:length(types)
+
+ disp(types{i});
+
+ cd([subDir types{i} '/Analysis/'])
+ if strcmp(types{i},'facescene_loc_highSNR')
+
+ %tsdiffana([sub '-EPI-001.nii'; sub '-EPI-002.nii'],[],100)
+ tsdiffana([sub '-EPI-001.nii'],[],100)
+
+ else
+
+ tsdiffana([sub '-EPI-001.nii';
+ sub '-EPI-002.nii';
+ sub '-EPI-003.nii';
+ sub '-EPI-004.nii';
+ sub '-EPI-005.nii';
+ sub '-EPI-006.nii'],[],100)
+ %sub '-EPI-007.nii'],[],100)
+
+ end
+
+ %save the output figure
+ saveas(100,'tsdiffana_output.pdf')
+ close(100)
+
+ %make a directory to save tsdiffana output to
+ if ~exist('tsdiffana','dir')
+ mkdir('tsdiffana')
+ end
+ outDir = [subDir types{i} '/Analysis/tsdiffana/'];
+ movefile('timediff.mat',outDir)
+ movefile('v*nii',outDir)
+ movefile('tsdiffana_output.pdf',outDir)
+
+end
+
+cd(scriptDir)
View
279 users/Rachel/LGN_Code/coherence_analysis/coherence_fmri_only_rd.m
@@ -0,0 +1,279 @@
+%%%% Coherence with fMRI data
+
+%% We are now going to calculate the coherence and channel capacity using
+% Hsu, Borst, Theunissen methodology for some fMRI voxel data.
+
+% First we will load data from three voxels. The voxels are from visual
+% cortex, recording during stimulation with natural movies. The 486 sec
+% movie was repeated 10 times. One voxel has good signal to noise, another
+% average, and the last bad signal to noise.
+load ../data/vox_data.mat
+
+%% As with the psths, average the even and odd trials, average all the
+% trials, vor each voxel.
+vox_av_even = mean(vox_av(:,2:2:10),2);
+vox_av_odd = mean(vox_av(:,1:2:10),2);
+vox_av_mean = mean(vox_av,2);
+
+vox_good_even = mean(vox_good(:,2:2:10),2);
+vox_good_odd = mean(vox_good(:,1:2:10),2);
+vox_good_mean = mean(vox_good,2);
+
+vox_bad_even = mean(vox_bad(:,2:2:10),2);
+vox_bad_odd = mean(vox_bad(:,1:2:10),2);
+vox_bad_mean = mean(vox_bad,2);
+
+%% choose a voxel to work with
+vox = vox_good';
+vox_mean = vox_good_mean';
+vox_even = vox_good_even';
+vox_odd = vox_good_odd';
+vox_pred = vox_good_pred';
+
+%% Assignment. First plot the ave response and one or two trials - to give you a sense of the data.
+% Then calculate the Noise, its spectral density (is it white) and its
+% amplitude distribution (is it normal).
+t = 1:size(vox,2);
+ntrials = size(vox,1);
+trials = 1:ntrials;
+
+figure
+hold on
+plot(t, vox)
+plot(t, vox_mean, 'LineWidth', 2)
+
+%% Calculate the noise
+signal = vox_mean;
+noise = vox - repmat(signal, ntrials, 1);
+
+noise_tot = [];
+noise_d1_tot = [];
+signal_tot = [];
+for itrial=1:ntrials
+
+ % Calculate the two estimates of the noise
+ noise_d1(itrial, :) = vox(itrial, :) - mean(vox(find(trials ~= itrial),:), 1);
+
+ noise_tot = [noise_tot noise(itrial, :)];
+ noise_d1_tot = [noise_d1_tot noise_d1(itrial, :)];
+
+ % Plot the noises
+ if (itrial == 1)
+ figure
+ hold on
+ plot(noise_d1(itrial, :), 'r--');
+ plot(noise(itrial,:), 'r');
+
+ % Plot the signal
+ plot(signal, 'b');
+ hold off;
+ end
+end
+
+%% Is the noise white?
+TR = 1;
+fs = 1/TR;
+window = [];
+nfft = 486;
+ % There are many different algorithms for estimating a power spectral
+ % density. The simplest is the periodogram that divides the time series
+ % into non-overlapping chunkds of size nfft (in points) and multiplies
+ % that segment with the weights given by the window. If window is null,
+ % periodogram uses a rectangular window.
+%[Pnoise,f] = periodogram(noise_tot,window,nfft,fs);
+%[Pnoise_d1,f] = periodogram(noise_d1_tot,window,nfft,fs);
+%[Psignal,f] = periodogram(signal_tot,window,nfft,fs);
+
+% Another methods is to use overlapping chunks - this is called Welch's
+% method. Here window can be the number of points (usually equal to nfft)
+% of a hamming window or a vector of weights. noverlap is the number of
+% points in the overlapp. If noverlap is [], it is set to nfft/2.
+% [Pxx,f] = pwelch(x,window,noverlap,nfft,fs). You will see that using the
+% hamming window gives a smoother
+
+
+window = 486;
+% window = ones(1,nfft); % square window
+noverlap = [];
+% noverlap = 0;
+nfft = 486;
+nw = 3;
+
+%[Pnoise, f] = periodogram(noise_tot,[], window, fs);
+
+[Pnoise, f] = pwelch(noise_tot, window, noverlap, nfft, fs);
+[Pnoise_d1, f] = pwelch(noise_d1_tot, window, noverlap, nfft, fs);
+[Psignal, f] = pwelch(signal, window, noverlap, nfft, fs);
+
+%[Pnoise,f] = pmtm(noise_tot, nw, nfft,fs);
+%[Pnoise_d1,f] = pmtm(noise_d1_tot, nw, nfft,fs);
+%[Psignal,f] = pmtm(signal_tot, nw, nfft,fs);
+
+figure;
+plot(f, 10*log10(Pnoise), 'r');
+hold on;
+plot(f, 10*log10(Pnoise_d1), 'r--');
+plot(f, 10*log10(Psignal), 'k');
+legend('Noise', 'Noise D1', 'Signal');
+xlabel('Frequency (Hz)');
+ylabel('Power (dB)');
+hold off;
+
+% We can also display the signal to noise ratio.
+figure;
+plot(f, log10(Psignal./Pnoise), 'k');
+hold on;
+plot(f, log10(Psignal./Pnoise_d1), 'k--');
+ylabel('SNR');
+xlabel('Frequency (Hz)');
+
+%% Is the noise Gaussian?
+figure;
+histfit(10*log10(Pnoise))
+xlabel('Power (dB)')
+title('Noise')
+
+figure;
+histfit(10*log10(Pnoise_d1))
+xlabel('Power (dB)')
+title('Noise D1')
+
+
+%% we're going to make a copy of the average response and corrupt
+% it with Gaussian noise, pretending it's a response that comes from
+% some model
+noiseGain = 1; %play with gain to increase or decrease PSTH corruption
+gaussNoise = randn(size(vox_mean)) * noiseGain; %make some noise!
+vox_meanNoisy = vox_mean + gaussNoise; %corrupt response
+
+%% finally, we're going to compute the upper bound of coherence, as
+% for the voxel itself, as well as the coherence between the noise-
+% corrupted response and actual response
+
+infoFreqCutoff = -1; %max frequency in Hz to compute coherence for
+infoWindowSize = 50; %window size in seconds to compute coherence FFT. The default in compute_coherence_full is 500 ms.
+numStimPresentations = 10;
+fmri_sampling_rate = 1; % This BOLD signal has a 1 Hz sampling rate
+
+[cBound, cModel] = compute_coherence_full(vox_meanNoisy, vox_mean, vox_even,...
+ vox_odd, fmri_sampling_rate, numStimPresentations,...
+ infoFreqCutoff, infoWindowSize);
+
+performanceRatio = cModel.info / cBound.info; %how well did our noisy psth do?
+
+%% now we'll make some plots of the coherence values, solid lines
+% are the upper bounds, dotted lines are noisy PSTHs
+figure; hold on;
+plot(cBound.f, cBound.c, 'k-', 'LineWidth', 2);
+plot(cBound.f, cBound.cUpper, 'b-', 'LineWidth', 2);
+plot(cBound.f, cBound.cLower, 'r-', 'LineWidth', 2);
+
+plot(cModel.f, cModel.c, 'k--', 'LineWidth', 2);
+plot(cModel.f, cModel.cUpper, 'b--', 'LineWidth', 2);
+plot(cModel.f, cModel.cLower, 'r--', 'LineWidth', 2);
+xlabel('Frequency (Hz)');
+ylabel('Coherence');
+theTitle = sprintf('Info=%0.2f bits/s out of %0.2f bits/s | Ratio=%0.2f', ...
+ cModel.info, cBound.info, performanceRatio);
+title(theTitle);
+axis([min(cBound.f), max(cBound.f), 0, 1]);
+
+
+%% Now we're going to compute coherence with a response that does come from a model
+% of visual cortex created by the Gallant lab. The model predictions are the variables
+% that end in "_pred", i.e. vox_pred
+
+infoFreqCutoff = -1; %max frequency in Hz to compute coherence for
+infoWindowSize = 50; %window size in seconds to compute coherence FFT. The default in compute_coherence_full is 500 ms.
+numStimPresentations = 10;
+fmri_sampling_rate = 1; % This BOLD signal has a 1 Hz sampling rate
+
+[cBound, cModel] = compute_coherence_full(vox_pred, vox_mean, vox_even,...
+ vox_odd, fmri_sampling_rate, numStimPresentations,...
+ infoFreqCutoff, infoWindowSize);
+
+performanceRatio = cModel.info / cBound.info; %how well did our noisy psth do?
+
+%% now we'll make some plots of the coherence values, solid lines
+% are the upper bounds, dotted lines are noisy PSTHs
+figure; hold on;
+plot(cBound.f, cBound.c, 'k-', 'LineWidth', 2);
+plot(cBound.f, cBound.cUpper, 'b-', 'LineWidth', 2);
+plot(cBound.f, cBound.cLower, 'r-', 'LineWidth', 2);
+
+plot(cModel.f, cModel.c, 'k--', 'LineWidth', 2);
+plot(cModel.f, cModel.cUpper, 'b--', 'LineWidth', 2);
+plot(cModel.f, cModel.cLower, 'r--', 'LineWidth', 2);
+xlabel('Frequency (Hz)');
+ylabel('Coherence');
+theTitle = sprintf('Info=%0.2f bits/s out of %0.2f bits/s | Ratio=%0.2f', ...
+ cModel.info, cBound.info, performanceRatio);
+title(theTitle);
+axis([min(cBound.f), max(cBound.f), 0, 1]);
+
+%% How about coherence between the mean signal and the response from a
+% single trial
+
+infoFreqCutoff = -1; %max frequency in Hz to compute coherence for
+infoWindowSize = 50; %window size in seconds to compute coherence FFT. The default in compute_coherence_full is 500 ms.
+numStimPresentations = 10;
+fmri_sampling_rate = 1; % This BOLD signal has a 1 Hz sampling rate
+sample_trial = 2;
+
+[cBound, cModel] = compute_coherence_full(vox(sample_trial,:), vox_mean, vox_even,...
+ vox_odd, fmri_sampling_rate, numStimPresentations,...
+ infoFreqCutoff, infoWindowSize);
+
+performanceRatio = cModel.info / cBound.info; %how well did our noisy psth do?
+
+%% now we'll make some plots of the coherence values, solid lines
+% are the upper bounds, dotted lines are noisy PSTHs
+figure; hold on;
+plot(cBound.f, cBound.c, 'k-', 'LineWidth', 2);
+plot(cBound.f, cBound.cUpper, 'b-', 'LineWidth', 2);
+plot(cBound.f, cBound.cLower, 'r-', 'LineWidth', 2);
+
+plot(cModel.f, cModel.c, 'k--', 'LineWidth', 2);
+plot(cModel.f, cModel.cUpper, 'b--', 'LineWidth', 2);
+plot(cModel.f, cModel.cLower, 'r--', 'LineWidth', 2);
+xlabel('Frequency (Hz)');
+ylabel('Coherence');
+theTitle = sprintf('Info=%0.2f bits/s out of %0.2f bits/s | Ratio=%0.2f', ...
+ cModel.info, cBound.info, performanceRatio);
+title(theTitle);
+axis([min(cBound.f), max(cBound.f), 0, 1]);
+
+
+%% Now coherence between trial response and mean signal for all trials
+
+infoFreqCutoff = -1; %max frequency in Hz to compute coherence for
+infoWindowSize = 50; %window size in seconds to compute coherence FFT. The default in compute_coherence_full is 500 ms.
+numStimPresentations = 10;
+fmri_sampling_rate = 1; % This BOLD signal has a 1 Hz sampling rate
+
+for itrial=1:ntrials
+
+ [cBound, cModel] = compute_coherence_full(vox(itrial,:), vox_mean, vox_even,...
+ vox_odd, fmri_sampling_rate, numStimPresentations,...
+ infoFreqCutoff, infoWindowSize);
+
+ performanceRatio = cModel.info / cBound.info; %how well did our noisy psth do?
+
+ modelInfo(itrial,1) = cModel.info;
+ boundInfo(itrial,1) = cBound.info;
+ infoRatio(itrial,1) = performanceRatio;
+
+end
+
+figure
+hold on
+bar(1,mean(modelInfo),.5)
+errorbar(mean(modelInfo),std(modelInfo)./sqrt(ntrials))
+plot(mean(boundInfo),'+g','MarkerSize',25)
+xlim([0 2])
+theTitle = sprintf('Average Info=%0.2f bits/s out of %0.2f bits/s | Ratio=%0.2f', ...
+ mean(modelInfo), mean(boundInfo), mean(infoRatio));
+title(theTitle);
+
+
+
View
413 users/Rachel/LGN_Code/coherence_analysis/coherence_tutorial_solution_full_fmri.m
@@ -0,0 +1,413 @@
+
+%% preliminary stuff: get the directory we're in
+% and add the validation subdirectory to the path
+cpath = which('coherence_tutorial');
+[rootDir, name, ext, versn] = fileparts(cpath);
+vpath = fullfile(rootDir, 'validation');
+addpath(vpath);
+ppath = fullfile(rootDir, 'preprocessing');
+addpath(ppath);
+dataDir = fullfile(rootDir, '../data'); %contains stim/response pairs
+stimsDir = fullfile(dataDir, 'all_stims'); %contains the .wav files
+
+
+%% The next three sections allow you to load and visualize single unit data from
+% The theunissen lab. Your goals are: 1. Get familiar with this data structure and
+% 2. Load you own data in a similar structure.
+% For the Theunissen data you can specify a directory for three brain
+% regions and three example neurons in each.:
+% 'mld' is the avian auditory midbrain
+% 'ov' is the avian auditory thalamus
+% 'l2a' is the avian auditory cortex
+% each region has a 'good', 'avg', and 'bad' dataset,
+% corresponding to the signal to noise ratio,
+% quantified by information values.
+cellDirName = 'mld_good';
+cellDir = fullfile(dataDir, cellDirName);
+
+
+%% now we're going to get the stimulus and response
+% files from the cell directory using a function that
+% was written to deal with this directory structure.
+% we'll will pull stim/response files for conspecific
+% stimuli. You should write your own data load function for your data.
+datasets = find_datasets(cellDir, stimsDir, 'conspecific');
+cellStimDir = datasets{1}.dirname;
+stimFiles = datasets{1}.srPairs.stimFiles; %paths to .wav files
+respFiles = datasets{1}.srPairs.respFiles; %paths to spike* files
+
+
+%% now we're going to preprocess the sound stimuli by taking the
+% short time fourier transform, and preprocess the raw spike
+% times into PSTHs for each stim/response pair
+preprocDir = fullfile(cellStimDir, 'preproc'); %cache the preprocessed data here
+[s,mess,messid] = mkdir(preprocDir);
+preprocOptions = struct; %we'll leave this empty and use default options
+
+srData = preprocess_sound(stimFiles, respFiles, 'ft', struct, preprocDir);
+pairCount = length(srData.datasets); %# of stim/response pairs
+
+
+%% visualize the stimulus/response pairs by setting showSRPairs = 1
+showSRPairs = 0;
+if showSRPairs
+ %go through each pair in the dataset
+ for k = 1:pairCount
+ ds = srData.datasets{k};
+ plot_tf_resp(ds);
+ end
+end
+
+%% Assignment 1: Calculate the noise for each trial and display it
+% Calculate the noise using a signal generated from all trials as well
+% as a signal that does not incldue the trial for wich you calculate the
+% noise
+
+% These will be the noise and signal traces for all the stim-response pairs
+noise_tot = [];
+noise_d1_tot = [];
+signal_tot = [];
+
+wind1 = hanning(31)/sum(hanning(31)); % 31 ms smoothing for plotting only
+
+for k=1:pairCount
+ resp = srData.datasets{k}.resp;
+ stim = srData.datasets{k}.stim;
+ ntrial = length(resp.rawSpikeTimes); % Number of trials for this stim-response
+ stimdur = stim.stimLength*1000.0; % Stimulus duration in ms.
+ binsize = 1; % Sampling rate in for signal and noisems.
+ ndur = round(stimdur/binsize)+1; % Length of signal and noise for this stim-response pair
+ noise = zeros(ntrial, ndur);
+ noise_d1 = zeros(ntrial, ndur);
+
+% Transform spike arrival times into a time series at sampling rate binsize
+ for itrial=1:ntrial
+
+ stimes = resp.rawSpikeTimes{itrial};
+ indx = ((stimes >= 0) & (stimes <= stimdur));
+
+ stimes = stimes(indx);
+ sindxs = round(stimes/binsize) + 1;
+ % Prevent exceeding array dimensions (should not happen except
+ % because of rounding off).
+ for j = 1:length(sindxs)
+ if sindxs(j) <= 0
+ sindxs(j) = 1;
+ end
+ if sindxs(j) > ndur
+ sindxs(j) = ndur;
+ end
+ end
+ % Here is my time series - I called it noise but it is the
+ % time-series for this trial
+ noise(itrial, sindxs) = 1;
+ end
+
+ % Now calculate signal, noise and noise_d1 and plot the first trial
+ % only (for clarity)
+
+ figure;
+ signal = mean(noise,1); % The signal is estimated as the average of all trials
+ signal_tot = [signal_tot signal];
+
+ trials = 1:ntrial;
+ for itrial=1:ntrial
+
+ % First plot the response of the first trial
+ if (itrial == 1)
+ plot(conv(noise(itrial,:), wind1), 'k');
+ hold on;
+ end
+
+ % Calculate the two estimates of the noise
+ noise_d1(itrial, :) = noise(itrial, :) - mean(noise(find(trials ~= itrial),:), 1);
+ noise(itrial, :) = noise(itrial,:) - signal;
+ noise_tot = [noise_tot noise(itrial, :)];
+ noise_d1_tot = [noise_d1_tot noise_d1(itrial, :)];
+
+ % Plot the noises
+ if (itrial == 1)
+ plot(conv(noise_d1(itrial, :), wind1), 'r--');
+ plot(conv(noise(itrial,:), wind1), 'r');
+
+ % Plot the signal
+ plot(conv(signal, wind1), 'b');
+ hold off;
+ end
+ end
+end
+
+
+
+%% Assignment 2: Calculate the noise and signal psd obtained by averaging
+% and by averaging after JNF. Is the noise white?
+fs = 1000.0;
+window = [];
+nfft = 250;
+ % There are many different algorithms for estimating a power spectral
+ % density. The simplest is the periodogram that divides the time series
+ % into non-overlapping chunkds of size nfft (in points) and multiplies
+ % that segment with the weights given by the window. If window is null,
+ % periodogram uses a rectangular window.
+%[Pnoise,f] = periodogram(noise_tot,window,nfft,fs);
+%[Pnoise_d1,f] = periodogram(noise_d1_tot,window,nfft,fs);
+%[Psignal,f] = periodogram(signal_tot,window,nfft,fs);
+
+% Another methods is to use overlapping chunks - this is called Welch's
+% method. Here window can be the number of points (usually equal to nfft)
+% of a hamming window or a vector of weights. noverlap is the number of
+% points in the overlapp. If noverlap is [], it is set to nfft/2.
+% [Pxx,f] = pwelch(x,window,noverlap,nfft,fs). You will see that using the
+% hamming window gives a smoother
+
+window = 250;
+noverlap = 125;
+nfft=250;
+nw = 3;
+
+%[Pnoise, f] = periodogram(noise_tot,[], window, fs);
+
+[Pnoise,f] = pwelch(noise_tot, window,noverlap,nfft,fs);
+[Pnoise_d1,f] = pwelch(noise_d1_tot, window,noverlap,nfft,fs);
+[Psignal,f] = pwelch(signal_tot, window, noverlap, nfft,fs);
+
+%[Pnoise,f] = pmtm(noise_tot, nw, nfft,fs);
+%[Pnoise_d1,f] = pmtm(noise_d1_tot, nw, nfft,fs);
+%[Psignal,f] = pmtm(signal_tot, nw, nfft,fs);
+
+figure;
+plot(f, 10*log10(Pnoise), 'r');
+hold on;
+plot(f, 10*log10(Pnoise_d1), 'r--');
+plot(f, 10*log10(Psignal), 'k');
+legend('Noise', 'Noise D1', 'Signal');
+xlabel('Frequency (Hz)');
+ylabel('Power (dB)');
+hold off;
+
+% Note that the noise is not white but increases with frequency.
+
+% We can also display the signal to noise ratio.
+figure;
+plot(f, log10(Psignal./Pnoise), 'k');
+hold on;
+plot(f, log10(Psignal./Pnoise_d1), 'k--');
+ylabel('SNR');
+xlabel('Frequency (Hz)');
+axis([0 500 -3 3]);
+
+%% Assignment 3: Is the noise gaussian?
+% Matlab has a nice command called histfit that generates a histogram and
+% displays a probability density fit. The default is a normal
+% distribution.
+
+figure;
+% histfit(noise_d1_tot);
+% At 1ms resolution spiking noise is not very gaussian! How about after
+% smoothing?
+histfit(conv(noise_d1_tot, wind1));
+% One can see that neural noise has very high kurtosis.
+
+
+%% Assignment 4: Calculate and display the coherence calculated from the signal to noise
+% ratio.
+
+% The coherence is given by
+coh_snr = Psignal./(Pnoise + Psignal);
+df = f(2) - f(1);
+info_snr = sum(-log2(1-coh_snr))*df;
+coh_snr_d1 = Psignal./(Pnoise_d1 + Psignal);
+info_snr_d1 = sum(-log2(1-coh_snr_d1))*df;
+
+figure;
+plot(f, coh_snr, 'k');
+hold on;
+plot(f, coh_snr_d1, 'k--');
+ylabel('Coherence');
+xlabel('Frequency (Hz)');
+title(sprintf('Total information: UB %.f LB %.0f bits/s', info_snr, info_snr_d1));
+
+%% We are now going to calculate the coherence and channel capacity using
+% Hsu, Borst, Theunissen methodology. In order to do that we need
+% to take the raw spike times, split them into even and odd trials,
+% and compute PSTHs for each half. there's already a function to
+% do this, so we'll just call it.
+[oddPsths, evenPsths] = compute_psth_halves(srData);
+
+%% the next step is to concatenate the PSTHs across all stim/response
+% pairs into one big vector. we're going to do the same thing to
+% the psth halves as well. we're also going to take note of the
+% # of spike trials
+psthConcat = [];
+psthHalf1Concat = [];
+psthHalf2Concat = [];
+numStimPresentations = -1;
+for k = 1:pairCount
+
+ ds = srData.datasets{k};
+ numStimPresentations = length(ds.resp.rawSpikeTimes);
+ psth = ds.resp.psth;
+ psthConcat = [psthConcat psth];
+
+ psthHalf1Concat = [psthHalf1Concat oddPsths{k}];
+ psthHalf2Concat = [psthHalf2Concat evenPsths{k}];
+
+end
+
+%% we're going to make a copy of the concatenated PSTH and corrupt
+% it with Gaussian noise, pretending it's a PSTH that comes from
+% some model
+noiseGain = 1e-1; %play with gain to increase or decrease PSTH corruption
+noise = randn(size(psthConcat)) * noiseGain; %make some noise!
+psthConcatNoisy = psthConcat + noise; %corrupt PSTH
+psthConcatNoisy(psthConcatNoisy < 0) = 0; %rectify
+psthConcatNoisy(psthConcatNoisy > 1) = 1; %rectify
+
+%% finally, we're going to compute the upper bound of coherence, as
+% for the cell itself, as well as the coherence between the noise-
+% corrupted PSTH and actual PSTH
+
+infoFreqCutoff = -1; %max frequency in Hz to compute coherence for
+infoWindowSize = 0.250; %window size in seconds to compute coherence FFT
+[cBound, cModel] = compute_coherence_full(psthConcatNoisy, psthConcat, psthHalf1Concat,...
+ psthHalf2Concat, srData.respSampleRate, numStimPresentations,...
+ infoFreqCutoff, infoWindowSize);
+
+performanceRatio = cModel.info / cBound.info; %how well did our noisy psth do?
+
+%% now we'll make some plots of the coherence values, solid lines
+% are the upper bounds, dotted lines are noisy PSTHs
+figure; hold on;
+plot(cBound.f, cBound.c, 'k-', 'LineWidth', 2);
+plot(cBound.f, cBound.cUpper, 'b-', 'LineWidth', 2);
+plot(cBound.f, cBound.cLower, 'r-', 'LineWidth', 2);
+
+plot(cModel.f, cModel.c, 'k--', 'LineWidth', 2);
+plot(cModel.f, cModel.cUpper, 'b--', 'LineWidth', 2);
+plot(cModel.f, cModel.cLower, 'r--', 'LineWidth', 2);
+xlabel('Frequency (Hz)');
+ylabel('Coherence');
+theTitle = sprintf('Info=%0.0f bits/s out of %0.0f bits/s | Ratio=%0.2f', ...
+ cModel.info, cBound.info, performanceRatio);
+title(theTitle);
+axis([min(cBound.f), max(cBound.f), 0, 1]);
+
+
+
+
+%%%% Coherence with fMRI data
+
+%% We are now going to calculate the coherence and channel capacity using
+% Hsu, Borst, Theunissen methodology for some fMRI voxel data.
+
+% First we will load data from three voxels. The voxels are from visual
+% cortex, recording during stimulation with natural movies. The 486 sec
+% movie was repeated 10 times. One voxel has good signal to noise, another
+% average, and the last bad signal to noise.
+load ../data/vox_data.mat
+
+%% As with the psths, average the even and odd trials, average all the
+% trials, vor each voxel.
+vox_av_even = mean(vox_av(:,2:2:10),2);
+vox_av_odd = mean(vox_av(:,1:2:10),2);
+vox_av_mean = mean(vox_av,2);
+
+vox_good_even = mean(vox_good(:,2:2:10),2);
+vox_good_odd = mean(vox_good(:,1:2:10),2);
+vox_good_mean = mean(vox_good,2);
+
+vox_bad_even = mean(vox_bad(:,2:2:10),2);
+vox_bad_odd = mean(vox_bad(:,1:2:10),2);
+vox_bad_mean = mean(vox_bad,2);
+
+%% Assignment. First plot the have response and one or two trials - to give you a sense of the data.
+% Then calculate the Noise, its spectral density (is it white) and its
+% amplitude distribution (is it normal).
+
+% choose a voxel to work with
+vox = vox_good;
+vox_mean = vox_good_mean;
+
+t = 1:size(vox,1);
+ntrials = size(vox,2);
+
+figure
+hold on
+plot(t, vox)
+plot(t, vox_mean, 'LineWidth', 2)
+
+vox_noise = vox - repmat(vox_mean, 1, ntrials);
+
+
+%% we're going to make a copy of the average response and corrupt
+% it with Gaussian noise, pretending it's a response that comes from
+% some model
+noiseGain = 1; %play with gain to increase or decrease PSTH corruption
+noise = randn(size(vox_good_mean)) * noiseGain; %make some noise!
+vox_good_meanNoisy = vox_good_mean + noise; %corrupt response
+
+%% finally, we're going to compute the upper bound of coherence, as
+% for the voxel itself, as well as the coherence between the noise-
+% corrupted response and actual response
+
+infoFreqCutoff = -1; %max frequency in Hz to compute coherence for
+infoWindowSize = 50; %window size in seconds to compute coherence FFT. The default in compute_coherence_full is 500 ms.
+numStimPresentations = 10;
+fmri_sampling_rate = 1; % This BOLD signal has a 1 Hz sampling rate
+
+[cBound, cModel] = compute_coherence_full(vox_good_meanNoisy, vox_good_mean, vox_good_even,...
+ vox_good_odd, fmri_sampling_rate, numStimPresentations,...
+ infoFreqCutoff, infoWindowSize);
+
+performanceRatio = cModel.info / cBound.info; %how well did our noisy psth do?
+
+%% now we'll make some plots of the coherence values, solid lines
+% are the upper bounds, dotted lines are noisy PSTHs
+figure; hold on;
+plot(cBound.f, cBound.c, 'k-', 'LineWidth', 2);
+plot(cBound.f, cBound.cUpper, 'b-', 'LineWidth', 2);
+plot(cBound.f, cBound.cLower, 'r-', 'LineWidth', 2);
+
+plot(cModel.f, cModel.c, 'k--', 'LineWidth', 2);
+plot(cModel.f, cModel.cUpper, 'b--', 'LineWidth', 2);
+plot(cModel.f, cModel.cLower, 'r--', 'LineWidth', 2);
+xlabel('Frequency (Hz)');
+ylabel('Coherence');
+theTitle = sprintf('Info=%0.2f bits/s out of %0.2f bits/s | Ratio=%0.2f', ...
+ cModel.info, cBound.info, performanceRatio);
+title(theTitle);
+axis([min(cBound.f), max(cBound.f), 0, 1]);
+
+
+%% Now we're going to compute coherence with a response that does come from a model
+% of visual cortex created by the Gallant lab. The model predictions are the variables
+% that end in "_pred", i.e. vox_good_pred
+
+infoFreqCutoff = -1; %max frequency in Hz to compute coherence for
+infoWindowSize = 50; %window size in seconds to compute coherence FFT. The default in compute_coherence_full is 500 ms.
+numStimPresentations = 10;
+fmri_sampling_rate = 1; % This BOLD signal has a 1 Hz sampling rate
+
+[cBound, cModel] = compute_coherence_full(vox_good_pred, vox_good_mean, vox_good_even,...
+ vox_good_odd, fmri_sampling_rate, numStimPresentations,...
+ infoFreqCutoff, infoWindowSize);
+
+performanceRatio = cModel.info / cBound.info; %how well did our noisy psth do?
+
+%% now we'll make some plots of the coherence values, solid lines
+% are the upper bounds, dotted lines are noisy PSTHs
+figure; hold on;
+plot(cBound.f, cBound.c, 'k-', 'LineWidth', 2);
+plot(cBound.f, cBound.cUpper, 'b-', 'LineWidth', 2);
+plot(cBound.f, cBound.cLower, 'r-', 'LineWidth', 2);
+
+plot(cModel.f, cModel.c, 'k--', 'LineWidth', 2);
+plot(cModel.f, cModel.cUpper, 'b--', 'LineWidth', 2);
+plot(cModel.f, cModel.cLower, 'r--', 'LineWidth', 2);
+xlabel('Frequency (Hz)');
+ylabel('Coherence');
+theTitle = sprintf('Info=%0.2f bits/s out of %0.2f bits/s | Ratio=%0.2f', ...
+ cModel.info, cBound.info, performanceRatio);
+title(theTitle);
+axis([min(cBound.f), max(cBound.f), 0, 1]);
+
View
470 users/Rachel/LGN_Code/coherence_analysis/preprocessing/.svn/entries
@@ -0,0 +1,470 @@
+10
+
+dir
+260
+svn+ssh://fusiform.bic.berkeley.edu/auto/fhome/svn/tlab/trunk/src/tutorials/coherence_tutorial/preprocessing
+svn+ssh://fusiform.bic.berkeley.edu/auto/fhome/svn/tlab
+
+
+
+2010-09-09T22:36:12.635513Z
+251
+fet
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+9431706d-7860-0410-b998-d7eeeacd3889
+
+rv.m
+file
+
+
+
+
+2010-08-27T18:24:02.858000Z
+2f21009ec2efedb59c7ef77232cb7673
+2010-08-27T17:58:10.043629Z
+235
+mschachter
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+156
+
+spec_cmap.m
+file
+
+
+
+
+2010-08-27T18:24:02.865000Z
+4e9bee8c85824245c73c22b6b2bf54fb
+2010-08-27T17:58:10.043629Z
+235
+mschachter
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+695
+
+make_psth.m
+file
+
+
+
+
+2010-08-27T18:24:02.899000Z
+1b45496094c0bb77c225c7e3a3ee864d
+2010-08-27T17:58:10.043629Z
+235
+mschachter
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+620
+
+read_spikes_from_file.m
+file
+
+
+
+
+2010-08-27T18:24:03.010000Z
+59842f2715b9b8011c9648834bef6b02
+2010-08-27T17:58:10.043629Z
+235
+mschachter
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+253
+
+timefreq.m
+file
+
+
+
+
+2010-08-27T18:24:03.020000Z
+9082007afd83813b174a853cc71a5abe
+2010-08-27T17:58:10.043629Z
+235
+mschachter
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+4328
+
+GaussianSpectrum.m
+file
+
+
+
+
+2010-08-27T18:24:03.083000Z
+cfd2da534fadb3cdd1bede7c37825b2c
+2010-08-27T17:58:10.043629Z
+235
+mschachter
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+2354
+
+compute_srdata_means.m
+file
+
+
+
+
+2010-08-27T18:24:03.118000Z
+c7c0753ec9809419724e5f1f5839cdf3
+2010-08-27T17:58:10.043629Z
+235
+mschachter
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+2174
+
+make_tfrep.m
+file
+
+
+
+
+2010-08-27T18:24:03.170000Z
+a44e2f132640a5c289cdf4d5cc2af4a8
+2010-08-27T17:58:10.043629Z
+235
+mschachter
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+1774
+
+preprocess_sound.m
+file
+
+
+
+
+2010-08-27T18:24:03.184000Z
+e91d29d5b29f792a623454d0d567b9fe
+2010-08-27T17:58:10.043629Z
+235
+mschachter
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+6342
+
+plot_tfrep.m
+file
+
+
+
+
+2010-08-27T18:24:03.196000Z
+31e10a1fc0086585652bf7b7d0029a0a
+2010-08-27T17:58:10.043629Z
+235
+mschachter
+
+
+
+
+
+
+