diff --git a/Makefile b/Makefile index 9057f37..a650ea7 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: all clean coverage test +.PHONY: all clean coverage test verbose all: clean @@ -6,10 +6,10 @@ clean: find . -name "*.so" -o -name "*.pyc" -o -name "*.pyx.md5" | xargs rm -f coverage: - nosetests code/utils data --with-coverage --cover-package=data --cover-package=utils + nosetests code/utils/tests data/tests --with-coverage --cover-package=data/data.py --cover-package=code/utils/functions test: - nosetests code/utils data + nosetests code/utils/tests data/tests verbose: - nosetests -v code/utils data + nosetests -v code/utils/tests data/tests diff --git a/README.md b/README.md index 4675049..8420983 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,13 @@ -# project-template +# UC Berkeley Stat 159/259 Fall 2015 +## Project-Theta + [![Build Status](https://travis-ci.org/berkeley-stat159/project-theta.svg?branch=master)](https://travis-ci.org/berkeley-stat159/project-theta?branch=master) [![Coverage Status](https://coveralls.io/repos/berkeley-stat159/project-theta/badge.svg?branch=master)](https://coveralls.io/r/berkeley-stat159/project-theta?branch=master) -Fall 2015 group project +_**Group members:**_ Siyao Chang ([`changsiyao`](https://github.com/changsiyao)) , Boying Gong ([`boyinggong`](https://github.com/boyinggong)), Benjamin Hsieh ([`BenjaminHsieh`](https://github.com/BenjaminHsieh)), Brian Qiu ([`brianqiu`](https://github.com/brianqiu)), Jiang Zhu ([`pigriver123`](https://github.com/pigriver123)) + +_**Topic:**_ [The Neural Basis of Loss Aversion in Decision Making Under Risk] (https://openfmri.org/dataset/ds000005/) +[SOME SUMMARY OF PAPER] + +## Instructions +[NEED TO ADD] \ No newline at end of file diff --git a/code/Makefile b/code/Makefile index 8344e48..d4e3419 100644 --- a/code/Makefile +++ b/code/Makefile @@ -1,5 +1,7 @@ +.PHONY: test coverage + test: - nosetests code/utils -w .. + nosetests code/utils/tests -w .. coverage: - nosetests code/utils -w .. --with-coverage --cover-package=utils + nosetests code/utils/tests -w .. --with-coverage --cover-package=utils/functions diff --git a/code/scripts/find_mask_threshold.py b/code/scripts/find_mask_threshold.py index d834a66..0256ba4 100644 --- a/code/scripts/find_mask_threshold.py +++ b/code/scripts/find_mask_threshold.py @@ -2,7 +2,7 @@ import matplotlib.pyplot as plt import numpy as np import sys -sys.path.append('../utils') +sys.path.append('../utils/functions') import smooth_gaussian import os diff --git a/code/scripts/findoutlier.py b/code/scripts/findoutlier.py index e29733a..ad08428 100644 --- a/code/scripts/findoutlier.py +++ b/code/scripts/findoutlier.py @@ -1,7 +1,7 @@ import numpy as np import json import sys -pathtofunction = '../utils' +pathtofunction = '../utils/functions' # Append the sys path sys.path.append(pathtofunction) from outlierfunction import outlier diff --git a/code/scripts/graph_lindiag_script.py b/code/scripts/graph_lindiag_script.py index e3fed91..1e461fd 100644 --- a/code/scripts/graph_lindiag_script.py +++ b/code/scripts/graph_lindiag_script.py @@ -4,7 +4,7 @@ import sys # Path to function -pathtofunction = '../utils' +pathtofunction = '../utils/functions' # Append path to sys sys.path.append(pathtofunction) diff --git a/code/scripts/graphoutlier.py b/code/scripts/graphoutlier.py index 787fbdf..38c695f 100644 --- a/code/scripts/graphoutlier.py +++ b/code/scripts/graphoutlier.py @@ -9,11 +9,17 @@ # Paths pathtodata = '../../data/' pathtofig = '../../paper/figures/' -pathtofunction = '../utils' +pathtofunction = '../utils/functions' +pathtographing = '../utils/graphing' # Append fuction path sys.path.append(pathtofunction) # Import function -from graphoutlier_functions import loadnib_dict, loadtxt_dict, plot_dvars, plot_fd, plot_meanSig +from basic_util import loadnib_dict, loadtxt_dict + +# Append graphing path +sys.path.append(pathtographing) +# Import function +from graphoutlier_functions import plot_dvars, plot_fd, plot_meanSig # load outlier files diff --git a/code/scripts/lme_script.py b/code/scripts/lme_script.py index b5bb01f..0fbd080 100644 --- a/code/scripts/lme_script.py +++ b/code/scripts/lme_script.py @@ -5,12 +5,12 @@ import sys # Path to function -pathtofunction = '../utils' +pathtofunction = '../utils/functions' # Append path to sys sys.path.append(pathtofunction) from behavtask_tr import events2neural_extend, merge_cond -from regression_functions import hrf, getRegressor, calcBeta, calcMRSS, deleteOutliers +from regression_functions import hrf, getRegressor, deleteOutliers from lme_functions import calcBetaLme, calcSigProp, calcAnov, anovStat n_vols=240 diff --git a/code/scripts/logistic.py b/code/scripts/logistic.py index 7669426..4704aac 100644 --- a/code/scripts/logistic.py +++ b/code/scripts/logistic.py @@ -5,7 +5,7 @@ import sys # Path to function -pathtofunction = '../utils' +pathtofunction = '../utils/functions' # Append path to sys sys.path.append(pathtofunction) @@ -49,7 +49,16 @@ logreg_proba = logreg.predict_proba(X) confusion = create_confusion(logreg_proba, y) addsub = fig.add_subplot(4, 4, i) - addsub, AUC = plot_roc(confusion, addsub, i) + addsub, ROC, AUC = plot_roc(confusion, addsub) + # Plot the ROC curve. + plt.plot(ROC[:,0], ROC[:,1], lw=2) + plt.xlim(-0.1,1.1) + plt.ylim(-0.1,1.1) + plt.xlabel('$FPR(t)$') + plt.ylabel('$TPR(t)$') + plt.grid() + plt.title('subject '+ str(i)+', AUC = %.4f'%AUC) + #------------------------------------------------------------------------# Min_thrs = np.append(Min_thrs, getMin_thrs(confusion)) AUC_smr = np.append(AUC_smr, AUC) diff --git a/code/scripts/make_smooth_graphs.py b/code/scripts/make_smooth_graphs.py index cbd844b..877216a 100644 --- a/code/scripts/make_smooth_graphs.py +++ b/code/scripts/make_smooth_graphs.py @@ -2,7 +2,7 @@ import matplotlib.pyplot as plt import nibabel as nib import sys -sys.path.append('../utils') +sys.path.append('../utils/functions') import smooth_gaussian #possibly put in os function to specify path later diff --git a/code/scripts/regression_script.py b/code/scripts/regression_script.py index b12d466..949f621 100644 --- a/code/scripts/regression_script.py +++ b/code/scripts/regression_script.py @@ -5,7 +5,7 @@ import sys # Path to function -pathtofunction = '../utils' +pathtofunction = '../utils/functions' # Append path to sys sys.path.append(pathtofunction) diff --git a/code/utils/functions/basic_util.py b/code/utils/functions/basic_util.py new file mode 100644 index 0000000..01edebd --- /dev/null +++ b/code/utils/functions/basic_util.py @@ -0,0 +1,53 @@ +""" +A collection of basic utility functions for use in outlier detection/graphing +of fd, dvars, and meanSignal, see graphoutlier_functions.py +""" + +import numpy as np +import nibabel as nib + + +def loadtxt_dict(file_name, fig_name): + """ + Input: + Txt file specified by file_name + Name of figure file wish to output + + Output: + Dictonary that attached to file_name the np array after loading + file_name + """ + data = np.loadtxt(file_name) + dict_out = {fig_name: data} + return dict_out + +def loadnib_dict(file_name, fig_name): + """ + Input: + Bold file (bold.nii, bold.nii.gz) specified by file_name + Name of figure file wish to output + + Output: + Dictonary that attached to file_name the np array after loading file_name + """ + img = nib.load(file_name) + data = img.get_data() + dict_out = {fig_name: data} + return dict_out + + +# Calculate mean +def vol_mean(data): + """ Return mean across voxels for $D `data` + + Input: + np array of data + Output: np array of dim (T,) + mean of data across all but the last dimension + """ + mean_list = [] + # Loop over the each volume and outputs the mean of each dimension + for i in range(data.shape[-1]): + mean = np.mean(data[...,i]) + mean_list.append(mean) + return np.asarray(mean_list) diff --git a/code/utils/behavtask_tr.py b/code/utils/functions/behavtask_tr.py similarity index 92% rename from code/utils/behavtask_tr.py rename to code/utils/functions/behavtask_tr.py index 811ff24..73386f0 100644 --- a/code/utils/behavtask_tr.py +++ b/code/utils/functions/behavtask_tr.py @@ -70,10 +70,3 @@ def events2neural_extend(behav_task, tr, n_trs): time_course[on:on + dur,9] = RT return time_course -def plot_time_course(time_course): - """ - Simple function to plot time_course, an array from return of - events2neural_extend - """ - plt.plot(time_course[:,0]) - plt.show() diff --git a/code/utils/lme_functions.py b/code/utils/functions/lme_functions.py similarity index 96% rename from code/utils/lme_functions.py rename to code/utils/functions/lme_functions.py index 87053d5..a09daa4 100644 --- a/code/utils/lme_functions.py +++ b/code/utils/functions/lme_functions.py @@ -3,8 +3,6 @@ from scipy import stats import pandas as pd import numpy as np -from behavtask_tr import events2neural_extend, merge_cond -from regression_functions import hrf, getRegressor, calcBeta, calcMRSS, deleteOutliers def calcBetaLme(data_full, gain_full, loss_full, linear_full, quad_full, run_group, thrshd=None): """ diff --git a/code/utils/logistic_function.py b/code/utils/functions/logistic_function.py similarity index 85% rename from code/utils/logistic_function.py rename to code/utils/functions/logistic_function.py index 00f8df1..e9fbe50 100644 --- a/code/utils/logistic_function.py +++ b/code/utils/functions/logistic_function.py @@ -8,8 +8,8 @@ def create_confusion(logreg_proba, y, thrs_inc=0.01): Parameters ---------- - actual: Actual responses, 1-d array with values 0 or 1 - fitted: Fitted probabilities, 1-d array with values between 0 and 1 + y: Actual responses, 1-d array with values 0 or 1 + logreg_proba: Fitted probabilities, 2-d array with values between 0 and 1 in second column thrs_inc: increment of threshold probability (default 0.05) Returns @@ -52,7 +52,7 @@ def getMin_thrs(confusion): false_pos: number of incorrect trues false_neg: number of incorrect falses """ - thrs_min = np.argmin(confusion[:,3]+ confusion[:,4]) + thrs_min = np.min(confusion[:,3]+ confusion[:,4]) col_out = confusion[thrs_min, :] thrs = col_out[0] false_pos = col_out[3] @@ -60,7 +60,7 @@ def getMin_thrs(confusion): return thrs, false_pos, false_neg -def plot_roc(confusion, fig, sub_i): +def plot_roc(confusion, fig): """ function to plot the ROC (receiver operating characteristic) curve and calculate the corresponding AUC (Area Under Curve). @@ -83,19 +83,10 @@ def plot_roc(confusion, fig, sub_i): # Compute true positive rate for current threshold. TPR_t = confusion[i, 1] / float(confusion[i, 1] + confusion[i, 4]) ROC[i,1] = TPR_t - - # Plot the ROC curve. - plt.plot(ROC[:,0], ROC[:,1], lw=2) - plt.xlim(-0.1,1.1) - plt.ylim(-0.1,1.1) - plt.xlabel('$FPR(t)$') - plt.ylabel('$TPR(t)$') - plt.grid() AUC = 0. for i in range(confusion.shape[0]-1): AUC += (ROC[i+1,0]-ROC[i,0]) * (ROC[i+1,1]+ROC[i,1]) AUC *= -0.5 - plt.title('subject '+ str(sub_i)+', AUC = %.4f'%AUC) - return fig, AUC + return fig, ROC, AUC diff --git a/code/utils/outlierfunction.py b/code/utils/functions/outlierfunction.py similarity index 100% rename from code/utils/outlierfunction.py rename to code/utils/functions/outlierfunction.py diff --git a/code/utils/pearson.py b/code/utils/functions/pearson.py similarity index 100% rename from code/utils/pearson.py rename to code/utils/functions/pearson.py diff --git a/code/utils/regression_functions.py b/code/utils/functions/regression_functions.py similarity index 97% rename from code/utils/regression_functions.py rename to code/utils/functions/regression_functions.py index 426599c..fc8ed5d 100644 --- a/code/utils/regression_functions.py +++ b/code/utils/functions/regression_functions.py @@ -65,8 +65,7 @@ def calcBeta(data, gain, loss, linear_dr, quad_dr, threshold=None): design[:, 3] = quad_dr designp = npl.pinv(design) if threshold!=None: - mean_data = np.mean(data, axis=-1) - mask = mean_data > threshold + mask = np.mean(data, axis=-1) > threshold data[~mask]=0 T = data.shape[-1] time_by_vox = np.reshape(data, (-1, T)).T diff --git a/code/utils/smooth_gaussian.py b/code/utils/functions/smooth_gaussian.py similarity index 100% rename from code/utils/smooth_gaussian.py rename to code/utils/functions/smooth_gaussian.py diff --git a/code/utils/graph_lindiagnostics.py b/code/utils/graphing/graph_lindiagnostics.py similarity index 100% rename from code/utils/graph_lindiagnostics.py rename to code/utils/graphing/graph_lindiagnostics.py diff --git a/code/utils/graphoutlier_functions.py b/code/utils/graphing/graphoutlier_functions.py similarity index 81% rename from code/utils/graphoutlier_functions.py rename to code/utils/graphing/graphoutlier_functions.py index cc91e25..706eec7 100644 --- a/code/utils/graphoutlier_functions.py +++ b/code/utils/graphing/graphoutlier_functions.py @@ -1,38 +1,28 @@ """ -A collection of utility functions for outlier detection/graphing +A collection of graphing functions for outlier detection/graphing of fd, dvars, and meanSignal """ import numpy as np import matplotlib.pyplot as plt -import nibabel as nib -def loadtxt_dict(file_name, fig_name): - """ - Input: - Txt file specified by file_name - Name of figure file wish to output +# Calculate mean this is same as the one in basic_util.py +# Test already written, included here because graphing functions not tested +# but need to use above: +def vol_mean(data): + """ Return mean across voxels for $D `data` - Output: - Dictonary that attached to file_name the np array after loading - file_name + Input: + np array of data + Output: np array of dim (T,) + mean of data across all but the last dimension """ - data = np.loadtxt(file_name) - dict_out = {fig_name: data} - return dict_out + mean_list = [] + # Loop over the each volume and outputs the mean of each dimension + for i in range(data.shape[-1]): + mean = np.mean(data[...,i]) + mean_list.append(mean) + return np.asarray(mean_list) -def loadnib_dict(file_name, fig_name): - """ - Input: - Bold file (bold.nii, bold.nii.gz) specified by file_name - Name of figure file wish to output - - Output: - Dictonary that attached to file_name the np array after loading file_name - """ - img = nib.load(file_name) - data = img.get_data() - dict_out = {fig_name: data} - return dict_out # Graphing dvars: RMS signal derivative def plot_dvars(dvars_dict, dvars_outliers, saveit=False): @@ -64,6 +54,7 @@ def plot_dvars(dvars_dict, dvars_outliers, saveit=False): plt.savefig(dvars_dict.keys()[0]) plt.close() + # Graphing fd: Framewise displacement def plot_fd(fd_dict, fd_outliers, saveit=False): """ @@ -94,21 +85,6 @@ def plot_fd(fd_dict, fd_outliers, saveit=False): plt.savefig(fd_dict.keys()[0]) plt.close() -# Calculate mean -def vol_mean(data): - """ Return mean across voxels for $D `data` - - Input: - np array of data - Output: np array of dim (T,) - mean of data across all but the last dimension - """ - mean_list = [] - # Loop over the each volume and outputs the mean of each dimension - for i in range(data.shape[-1]): - mean = np.mean(data[...,i]) - mean_list.append(mean) - return np.asarray(mean_list) # Graphing mean signal def plot_meanSig(bdata_dict, saveit = False): @@ -141,4 +117,3 @@ def plot_meanSig(bdata_dict, saveit = False): plt.savefig(bdata_dict.keys()[0]) plt.close() - diff --git a/code/utils/stimuli.py b/code/utils/stimuli.py deleted file mode 100644 index 244607c..0000000 --- a/code/utils/stimuli.py +++ /dev/null @@ -1,37 +0,0 @@ -""" Functions to work with standard OpenFMRI stimulus files - -The functions have docstrings according to the numpy docstring standard - see: - - https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt -""" - -import numpy as np - -def events2neural(task_fname, tr, n_trs): - """ Return predicted neural time course from event file `task_fname` - - Parameters - ---------- - task_fname : str - Filename of event file - tr : float - TR in seconds - n_trs : int - Number of TRs in functional run - - Returns - ------- - time_course : array shape (n_trs,) - Predicted neural time course, one value per TR - """ - task = np.loadtxt(task_fname) - # Check that the file is plausibly a task file - if task.ndim != 2 or task.shape[1] != 3: - raise ValueError("Is {0} really a task file?", task_fname) - # Convert onset, duration seconds to TRs - task[:, :2] = task[:, :2] / tr - # Neural time course from onset, duration, amplitude for each event - time_course = np.zeros(n_trs) - for onset, duration, amplitude in task: - time_course[onset:onset + duration] = amplitude - return time_course diff --git a/code/utils/tests/test_graphoutlier_functions.py b/code/utils/tests/test_basic_util.py similarity index 52% rename from code/utils/tests/test_graphoutlier_functions.py rename to code/utils/tests/test_basic_util.py index 2f597dd..7a537c6 100644 --- a/code/utils/tests/test_graphoutlier_functions.py +++ b/code/utils/tests/test_basic_util.py @@ -1,31 +1,32 @@ """ -Test graph_function module the following functions: +Test basic_util module the following functions: loadtxt_dict loadnib_dict vol_mean Run with:: - - nosetests test_graph_functions.py + `make test` from code or project directory """ # Loading modules. from __future__ import absolute_import, division, print_function import numpy as np -import os -import sys +import sys, os from numpy.testing import assert_array_equal +import nibabel as nib # Set path -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/")) # Load graph_functions: -from graphoutlier_functions import loadtxt_dict, vol_mean +from basic_util import loadtxt_dict, loadnib_dict, vol_mean + # Test txt: np.savetxt('temp.txt', range(30)) # Test np array, dim = 4 # x1 = np.random.randn(100).reshape((2, 5, 2, 5)) -x2 = np.arange(64).reshape((2, 4, 2, 4)) +x2 = np.arange(64).reshape((2, 4, 2, 4)) + def test_loadtxt_dict(): # Using loadtxt_dict mydict = loadtxt_dict('temp.txt', 'mytxt') @@ -38,3 +39,14 @@ def test_vol_mean(): mymean = vol_mean(x2) truemean = np.array([30., 31 ,32 ,33]) assert_array_equal(mymean, truemean) + +def test_loadnib_dict(): + # create test img + t_data = np.array([1,2,3]) + t_dict = {'testnib': t_data} + img = nib.Nifti1Image(t_data, np.eye(4)) + nib.save(img,'testnib.nii.gz') + # my function + my_dict = loadnib_dict('testnib.nii.gz', 'testnib') + assert (list(t_dict.keys())[0] == list(my_dict.keys())[0]) + assert_array_equal(list(t_dict.values())[0], list(my_dict.values())[0]) diff --git a/code/utils/tests/test_behavtask_tr.py b/code/utils/tests/test_behavtask_tr.py new file mode 100644 index 0000000..902a4a6 --- /dev/null +++ b/code/utils/tests/test_behavtask_tr.py @@ -0,0 +1,55 @@ +""" +Test behavtask_tr module the following functions: + merge_cond + events2neural_extend + +Run with:: + **Run from project-theta directory with 'make test' + +""" +# Loading modules. +from __future__ import absolute_import, division, print_function +import numpy as np +import os +import sys +from numpy.testing import assert_allclose + + +# Set path +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../functions'))) + +# Path to the first subject, first run, this is used as the test data for +pathtotest = 'code/utils/tests/' + +# Load graph_functions: +from behavtask_tr import merge_cond, events2neural_extend + +def test_mergecond(): + # my function + my_merge = merge_cond(pathtotest+'test_behavdata.txt', pathtotest+'test_cond001.txt', pathtotest+'test_cond002.txt', pathtotest+'test_cond003.txt', pathtotest+'test_cond004.txt') + t_behav = np.loadtxt(pathtotest+'test_behavdata.txt', skiprows=1) + t_con1 = np.loadtxt(pathtotest+'test_cond001.txt') + t_con2 = np.loadtxt(pathtotest+'test_cond002.txt') + t_con3 = np.loadtxt(pathtotest+'test_cond003.txt') + t_con4 = np.loadtxt(pathtotest+'test_cond004.txt') + + # assert + assert_allclose(t_behav[:,1:], my_merge[:,6:]) + assert_allclose(t_con1, my_merge[:,:3]) + assert_allclose(t_con2[:,-1], my_merge[:,3]) + assert_allclose(t_con3[:,-1], my_merge[:,4]) + assert_allclose(t_con4[:,-1], my_merge[:,5]) + +def test_events2neuarl_extend(): + t_behav = np.array([np.arange(12)+1, np.arange(12)+2]) + tr = 2 + n_tr = 2 + lame = np.array([1]) + try: + events2neural_extend(lame, tr, n_tr) + except ValueError: + assert(True) + t_time = np.array([np.arange(3,13), np.arange(4,14)]) + my_time = events2neural_extend(t_behav, tr, n_tr) + assert_allclose(t_time, my_time) + diff --git a/code/utils/tests/test_lme_functions.py b/code/utils/tests/test_lme_functions.py index 0b4d19f..afea34f 100644 --- a/code/utils/tests/test_lme_functions.py +++ b/code/utils/tests/test_lme_functions.py @@ -12,14 +12,14 @@ # Loading modules. from __future__ import absolute_import, division, print_function import numpy as np -import sys +import sys, os from scipy import stats from sklearn import linear_model from numpy.testing import assert_almost_equal, assert_allclose # Append function path -sys.path.append('..') +sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/")) # Path to the first subject, first run, this is used as the test data for # getGainLoss: @@ -28,8 +28,6 @@ # Load graph_functions: from lme_functions import calcBetaLme, calcSigProp, calcAnov, anovStat - - def test_calcBetaLme(): # Test data with large n = 1500 X = np.ones((2000, 4)) @@ -45,9 +43,16 @@ def test_calcBetaLme(): test_betas = regr.coef_ # My function, should produce same results if groups are all the same: lme = calcBetaLme(Y, X[:,0], X[:,1], X[:,2], X[:,3], np.repeat(1,2000)) + lme_thrs = calcBetaLme(Y, X[:,0], X[:,1], X[:,2], X[:,3], np.repeat(1,2000), -40000) + lme_thrs1 = calcBetaLme(Y, X[:,0], X[:,1], X[:,2], X[:,3], np.repeat(1,2000), 10) # Compare betas my_betas = lme.ravel()[[0,2]] + my_betas_thrs = lme_thrs.ravel()[[0,2]] + my_betas_thrs1 = lme_thrs1.ravel()[[0,2]] assert max(abs(my_betas - test_betas[:2])) < 0.005 + assert max(abs(my_betas_thrs - test_betas[:2])) < 0.005 + assert (test_betas != my_betas_thrs1) + def test_calcSigProp(): # Set up test betas @@ -104,9 +109,13 @@ def ANOVA(G): test_anova = ANOVA(groups) # My function my_anova = calcAnov(t_data, run_group).ravel() - + my_anova_thrs = calcAnov(t_data, run_group, -40000).ravel() + my_anova_thrs1 = calcAnov(t_data, run_group, 10).ravel() # Assert assert_allclose(test_anova, my_anova) + assert_allclose(test_anova, my_anova_thrs) + assert (test_anova != my_anova_thrs1).any() + def test_anovStat(): # create a test dataset diff --git a/code/utils/tests/test_logistic_functions.py b/code/utils/tests/test_logistic_functions.py new file mode 100644 index 0000000..9e15e02 --- /dev/null +++ b/code/utils/tests/test_logistic_functions.py @@ -0,0 +1,81 @@ +""" +Test regression_function module the following functions: + create_confusion + getmin_thrs + plot_roc + +Run with:: + **Run from project-theta or code directory with 'make test' + +""" +# Loading modules. +from __future__ import absolute_import, division, print_function +import numpy as np +import os +import sys +from numpy.testing import assert_allclose + +# Set path +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../functions'))) + +# Load graph_functions: +from logistic_function import create_confusion, getMin_thrs, plot_roc + +def test_create_confusion(): + # Sample data + actual = np.array([0,1,1,0,0,1]) + fittedin = np.array([[0,0,0,0,0,0],[0.2, 0.6, 0.7, 0.1, 0.3, 0.9]]) + fitted = np.array([0.2, 0.6, 0.7, 0.1, 0.3, 0.9]) + # thrs_inc = 0.2 + thrs_array = np.array([0, 0.2, 0.4, 0.6, 0.8, 1.0]) + t_confusion = np.ones((len(thrs_array), 5)) + t_confusion[:,0] = thrs_array + # thrs_inc = 0.2 + for index, item in enumerate(thrs_array): + a = sum((actual == 1) & (fitted > item)) + b = sum((actual == 0) & (fitted <= item)) + c = sum((actual == 0) & (fitted > item)) + d = sum((actual == 1) & (fitted <= item)) + t_confusion[index, 1:5] = [a, b, c, d] + # my function + my_confusion = create_confusion(fittedin.T, actual, thrs_inc = 0.2) + + # Assert + assert_allclose(t_confusion, my_confusion) + +def test_getMin_thrs(): + # Test data + t_data = np.array([[0, 1, 2, 2, 3], [0.25, 1, 5, 1, 1], + [0.3, 3, 3, 1, 1], [0.5, 2, 3, 1, 3], + [1, 4, 1, 2, 1]]) + # Should return the row with the lowest sum of 4th and 5th column values + # the 2nd row + t_result = t_data[2, :] + t_result = np.array([t_result[0], t_result[3], t_result[4]]) + + my_thrs, my_fp, my_fn = getMin_thrs(t_data) + my_result = np.array([my_thrs, my_fp, my_fn]) + + # Assert + assert_allclose(t_result, my_result) + +def test_plot_roc(): + # test data + t_data = np.array([[0, 1, 2, 2, 3], [0.25, 1, 5, 1, 1], + [0.5, 2, 3, 1, 3],[1, 4, 1, 2, 1]]) + fig = 111 + # Calculating by hand: the roc matrix + # first column: 4th entry of each row / (4th + 3rd entries) + # second column: 2nd entry of each row / (2nd + 5th entries) + t_roc = np.array([[2/4, 1/4], [1/6, 1/2], [1/4, 2/5], [2/3, 4/5]]) + dif1 = np.array([t_roc.T[0][i+1] - t_roc.T[0][i] for i in range(len(t_roc.T[0])-1)]) + dif2 = np.array([t_roc.T[1][i+1] + t_roc.T[1][i] for i in range(len(t_roc.T[1])-1)]) + t_AUC = sum(dif1*dif2)* (-0.5) + + # my function + my_fig, my_ROC, my_AUC = plot_roc(t_data, fig) + + # assert + assert (t_AUC == my_AUC) + assert (fig == my_fig) + assert_allclose(t_roc, my_ROC) diff --git a/code/utils/tests/test_outlierfunction.py b/code/utils/tests/test_outlierfunction.py index 03c657c..e544153 100644 --- a/code/utils/tests/test_outlierfunction.py +++ b/code/utils/tests/test_outlierfunction.py @@ -17,7 +17,7 @@ import sys # Set path -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../functions'))) # Load function diff --git a/code/utils/tests/test_pearson_1d.py b/code/utils/tests/test_pearson_1d.py index 4eeb384..b87a0bc 100644 --- a/code/utils/tests/test_pearson_1d.py +++ b/code/utils/tests/test_pearson_1d.py @@ -28,7 +28,12 @@ import numpy as np -from .. import pearson +import os, sys + +# Add path +sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/")) + +import pearson from numpy.testing import assert_almost_equal diff --git a/code/utils/tests/test_pearson_2d.py b/code/utils/tests/test_pearson_2d.py index 05e4cce..dcb5f87 100644 --- a/code/utils/tests/test_pearson_2d.py +++ b/code/utils/tests/test_pearson_2d.py @@ -10,7 +10,11 @@ import numpy as np -from .. import pearson +import os, sys + +sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/")) + +import pearson from numpy.testing import assert_almost_equal diff --git a/code/utils/tests/test_regression_functions.py b/code/utils/tests/test_regression_functions.py index 3afb5f6..0ebf91b 100644 --- a/code/utils/tests/test_regression_functions.py +++ b/code/utils/tests/test_regression_functions.py @@ -21,7 +21,7 @@ # Set path -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../functions'))) # Path to the first subject, first run, this is used as the test data for # getGainLoss: @@ -179,6 +179,17 @@ def test_calcBeta(): assert_allclose(t_by_v.ravel(), regr.predict(X)) # assert design assert_allclose(X, design[:,:4]) + + #--------------------------------------------------------------------------# + Y = X[:,0] + X[:,1]*2 + X[:,2] + X[:,3] + 1 + # myfunction when thrs != None + design1, t_by_v1, my_beta1 = calcBeta(Y, X[:,0], X[:,1], X[:,2], X[:,3], 1) + # assert the threshold values are produce different betas and tbyv + assert (t_by_v.ravel() != t_by_v1.ravel()).any() + assert (my_beta.ravel() != my_beta1.ravel()).all() + # assert design still the same + assert_allclose(X, design1[:,:4]) + def test_calcMRSS(): # Like above, create a test matrix of regressors diff --git a/code/utils/tests/test_smoothing.py b/code/utils/tests/test_smoothing.py index fc43884..ed834ed 100644 --- a/code/utils/tests/test_smoothing.py +++ b/code/utils/tests/test_smoothing.py @@ -15,7 +15,7 @@ from numpy.testing import assert_allclose # Append function path -sys.path.append('..') +sys.path.append('../functions') # Path to the first subject, first run, this is used as the test data for # getGainLoss: diff --git a/data/Makefile b/data/Makefile index a2d17fa..feb0a1e 100644 --- a/data/Makefile +++ b/data/Makefile @@ -1,8 +1,10 @@ +.PHONY: data validate unzip test + data: wget http://openfmri.s3.amazonaws.com/tarballs/ds005_raw.tgz wget http://nipy.bic.berkeley.edu/rcsds/ds005_mnifunc.tar wget http://nipy.bic.berkeley.edu/rcsds/mni_icbm152_nlin_asym_09c_2mm.tar.gz - + validate: python data.py @@ -15,3 +17,6 @@ unzip: rm mni_icbm152_nlin_asym_09c_2mm.tar.gz mv mni_icbm152_nlin_asym_09c_2mm templates mv templates/mni_icbm152_t1_tal_nlin_asym_09c_2mm.nii templates/mni_standard.nii + +test: + nosetests data/tests -w .. diff --git a/data/README.md b/data/README.md index 853e687..628b8b1 100644 --- a/data/README.md +++ b/data/README.md @@ -1,10 +1,11 @@ The ds005 dataset, filtered ds005 dataset, and mni templates are stored here. Th e makefile is written such that: -- 'make data' will pull in the appropriate data -- 'make unzip' will unzip, remove, and rename certain files -- 'make validate' will run data.py to check the hashes of each downloaded file w +- `make data` will pull in the appropriate data +- `make unzip` will unzip, remove, and rename certain files +- `make validate` will run data.py to check the hashes of each downloaded file w ith a master hashlist included, ensuring all downloaded data is correct +- `make test` will run nosetest on the checking hashes function in data.py THE COMMANDS SHOULD BE DONE IN THIS ORDER to be successfully validated. The ds00 5 folder contains subfolders for each subject, the most relevant of which are: diff --git a/old_paper/.gitignore b/old_paper/.gitignore deleted file mode 100644 index e69de29..0000000 diff --git a/old_paper/Makefile b/old_paper/Makefile deleted file mode 100644 index 232a5dd..0000000 --- a/old_paper/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -TITLE="report" - -all: - pdflatex $(TITLE).tex - bibtex $(TITLE) - pdflatex $(TITLE).tex - pdflatex $(TITLE).tex - - -clean: - rm -f *.{aux,log,bbl,lof,lot,blg,out} diff --git a/old_paper/figures/dvars_sub1run1.png b/old_paper/figures/dvars_sub1run1.png deleted file mode 100644 index e49afb4..0000000 Binary files a/old_paper/figures/dvars_sub1run1.png and /dev/null differ diff --git a/old_paper/figures/dvars_sub9run1.png b/old_paper/figures/dvars_sub9run1.png deleted file mode 100644 index a5c4e06..0000000 Binary files a/old_paper/figures/dvars_sub9run1.png and /dev/null differ diff --git a/old_paper/figures/fd_sub1run1.png b/old_paper/figures/fd_sub1run1.png deleted file mode 100644 index 6e192e8..0000000 Binary files a/old_paper/figures/fd_sub1run1.png and /dev/null differ diff --git a/old_paper/figures/fd_sub9run1.png b/old_paper/figures/fd_sub9run1.png deleted file mode 100644 index 9f8816d..0000000 Binary files a/old_paper/figures/fd_sub9run1.png and /dev/null differ diff --git a/old_paper/figures/mean_sub1run1.png b/old_paper/figures/mean_sub1run1.png deleted file mode 100644 index f5ed6af..0000000 Binary files a/old_paper/figures/mean_sub1run1.png and /dev/null differ diff --git a/old_paper/figures/mean_sub9run1.png b/old_paper/figures/mean_sub9run1.png deleted file mode 100644 index 1502b27..0000000 Binary files a/old_paper/figures/mean_sub9run1.png and /dev/null differ diff --git a/old_paper/figures/qqplot.png b/old_paper/figures/qqplot.png deleted file mode 100644 index 1f203e4..0000000 Binary files a/old_paper/figures/qqplot.png and /dev/null differ diff --git a/old_paper/figures/res_fitted_log.png b/old_paper/figures/res_fitted_log.png deleted file mode 100644 index da72d8f..0000000 Binary files a/old_paper/figures/res_fitted_log.png and /dev/null differ diff --git a/old_paper/figures/res_fittedval.png b/old_paper/figures/res_fittedval.png deleted file mode 100644 index 2aa43e5..0000000 Binary files a/old_paper/figures/res_fittedval.png and /dev/null differ diff --git a/old_paper/project.bib b/old_paper/project.bib deleted file mode 100644 index 79356c5..0000000 --- a/old_paper/project.bib +++ /dev/null @@ -1,9 +0,0 @@ -@article{Tom2007LossAversion, - title={The Neural Basis of Loss Aversion in Decision-Making Under Risk}, - author={Tom, Sabrina M and others}, - journal={Science}, - volume={315}, - pages={515--518}, - year={2007}, - publisher={American Association for the Advancement of Science} -} diff --git a/old_paper/report.pdf b/old_paper/report.pdf deleted file mode 100644 index 1fc9fad..0000000 Binary files a/old_paper/report.pdf and /dev/null differ diff --git a/old_paper/report.tex b/old_paper/report.tex deleted file mode 100644 index 9c36ef7..0000000 --- a/old_paper/report.tex +++ /dev/null @@ -1,47 +0,0 @@ -\documentclass[11pt]{article} - -\usepackage[margin=0.75in]{geometry} -\usepackage{indentfirst} -\usepackage{graphicx} -\usepackage{float} -\bibliographystyle{siam} - -\title{The Neural Basis of Loss Aversion in Decision-Making Under Risk} -\author{ - Chang, Siyao \\ - \texttt{changsiyao} - \and - Gong, Boying\\ - \texttt{boyinggong} - \and - Hsieh, Benjamin\\ - \texttt{BenjaminHsieh} - \and - Qiu, Brian\\ - \texttt{brianqiu} - \and - Zhu, Jiang\\ - \texttt{pigriver123} -} - - - -\begin{document} -\maketitle - -\abstract{\input{sections/abstract.tex}} - -\input{sections/introduction.tex} - -\input{sections/data.tex} - -\input{sections/methods.tex} - -\input{sections/results.tex} - -\input{sections/discussion_challenges.tex} - - -\bibliography{project} - -\end{document} diff --git a/old_paper/sections/abstract.tex b/old_paper/sections/abstract.tex deleted file mode 100644 index fb8c779..0000000 --- a/old_paper/sections/abstract.tex +++ /dev/null @@ -1,13 +0,0 @@ -% Abstract -\par Our paper is the \textit{Neural Basis of Loss Aversion in Decision-Making -Under Risk} \cite{Tom2007LossAversion}. The experiment investigates the -phenomenon of loss aversion - where individuals decisions are influenced by the -amount of potential loss more than they are by the amount of potential gains. -The experiment involved giving 16 subjects 256 combinations of gain/loss of -dollars with a 50/50 chance. The subject's decisions of whether to accept or -reject of each of the proposed gambles were measured as well as their brain -activity in the fMRI machine. The behavioral data file records the subjects -responses and to which combination of values, and the BOLD data file records -the subjects neurological reponse in 4-dimensional fMRI data. - - diff --git a/old_paper/sections/data.tex b/old_paper/sections/data.tex deleted file mode 100644 index 8c3f44f..0000000 --- a/old_paper/sections/data.tex +++ /dev/null @@ -1,37 +0,0 @@ -\section{Data} -\subsection{Overview} -The study used 16 right-handed, healthy, English-speaking participants -recruited through ads posted on UCLA. Out of 16 subjects, 9 were female and -the mean age was 22 $ \pm $ 2.9 years. \cite{Tom2007LossAversion} - -\subsection{Behavorial Data} -The behavioral data consists of each subject undergoing 3 trial runs for the -``gamble'' task, in which each subject is presented with a combination of -potential monetary gains and losses given a 50/50 chance of to win/loss. Each -trial run consists of 86 different combinations of rewards/penalities spread -out accross 474 seconds. Intervals between each onset of task range from 4 to -8 seconds. Subjects were given the 4 choices in reponse to each gambling -proposal: -\begin{enumerate} - \item Strong Accept - \item Weak Accept - \item Weak Reject - \item Strong Reject -\end{enumerate} -The choices are recorded by denoting reponse numbers 1, 2, 3, and 4, -respectively. Furthermore, the response time for each gambling decision was -recorded in seconds. -\subsection{BOLD Data} -Blood-oxygen-level dependent (BOLD) imaging data were collected from each -subject as he/she performed the gamble tasks. 240 time scans were done on each -run with a time between each scan of 2 seconds. So total scanning time is 480 -seconds. Each scan consists of a snapshot consisting of a64 by 64 by 34 image -matrix. - -There are also 4 model conditions, with events corresponding to -\begin{enumerate} - \item Task - \item Parametric Gain - \item Parametric Loss - \item Distance from Indifference -\end{enumerate} \ No newline at end of file diff --git a/old_paper/sections/discussion_challenges.tex b/old_paper/sections/discussion_challenges.tex deleted file mode 100644 index 336ae92..0000000 --- a/old_paper/sections/discussion_challenges.tex +++ /dev/null @@ -1,21 +0,0 @@ -% Subsection of Discussion -\section{Discussion of Challenges} - -\par \indent One of the major challenges is trying to make this project as -reproducible as possible while following guidlines on documentation, testing -functions, and attempting to produce the results of the paper using our limited -understanding of fMRI data. Travis CI bugs with various versions of python, -coverage failures, and errors with directory/path locations often hinder the -process of smooth workflows. Collaboration between five group members is no -doubt difficult as we found it hard to come up with an attainable final goal -that is still rewarding. -\par Technically, most of us are new to python programming and reseach using -git workflows, thus we have only a preliminary understanding of the various -python resources available for our use. Additionally, lack of statistical -understanding of some aspects of the paper has urged us to do independent -research. Yet the disconnent between theory and implementation has been a major -obstacle as we try to put our knowledge into practice. -\par Some problems can be solved or alleviated by defined checkpoints and making -the effort to read and re-read the paper and ask questions. Further, as we -familiarize ourselves more and more with various python modules and toolkits, -results can be easier to attain and interpret. diff --git a/old_paper/sections/introduction.tex b/old_paper/sections/introduction.tex deleted file mode 100644 index 0a93d75..0000000 --- a/old_paper/sections/introduction.tex +++ /dev/null @@ -1,8 +0,0 @@ -\section{Introduction} -Most of the preliminary work we've done so far involves loading in the data -and running basic summary statistics. Our group has also written functions to -pull and graph the dvars (RMS of the signal derivatives) and the framewise -displacement. We use that in conjunction with the mean signal of the BOLD data -to identify outliers and use that in the near future to process our data -further. We have also begun rough functions to calculate the betas and begin -logistic regression. diff --git a/old_paper/sections/methods.tex b/old_paper/sections/methods.tex deleted file mode 100644 index e8b81df..0000000 --- a/old_paper/sections/methods.tex +++ /dev/null @@ -1,211 +0,0 @@ -\section{Methods} - -\subsection{Models and analysis} - -We use linear models to find the relationship between behavioral and nueral -loss aversion cross participants as well as how participants react to different -loss and gain level. Below we illustrate our model using simple multiple linear -regression form. We may implement a mixed-effects model treating partipants as -a random effect. Moreover, we may use the robust regression to reduce the -influence of outliers. - -\subsubsection{Behavioral analysis} - -We fit a Logistic regression model on the behavioral data to examine how the -response of individuals relates to the size of potential gain and loss of a -gamble. Following is the model: - -\begin{equation} -logit(Y_{resp}) = \beta_0 + \beta_{loss} *X_{loss} + \beta_{gain} * X_{gain} + -\epsilon -\end{equation} - -where $X_{loss}$ and $X_{gain}$ are the potential loss and gain value -seperately, $Y_{resp}$ is a categorical independent variable representing -the subjects' decision on whether to accept or reject the gambles: - -\begin{displaymath} -Y_{resp} = \left \{ \begin{array}{ll} -1 & \textrm{If the subject accepted the gamble.} \\ -0 & \textrm{If the subject rejected the gamble.} -\end{array} \right . -\end{displaymath} - -Then we calculate the the behavioral loss aversion ($ \lambda $) for each -subject as follows, note that for simplicity, we collapse 3 runs into one model -for each participant. - -\begin{equation} -\lambda = -\beta_{loss} / \beta_{gain} -\end{equation} - -We use $\lambda$ as the metric for the degree of loss aversion for each -participant. We have used R to fit the Logistic model, just as what the -authors did in the paper, and we achieved almost the same results as the paper -presented. - -\subsubsection{Linear Regression on BOLD data} - -For each voxel $i$, we fit a multiple linear model: - -\begin{equation} -Y_{i} = \beta_{i, 0} + \beta_{i, loss} *X_{loss} + \beta_{i, gain} * X_{gain} + -\epsilon_i -\end{equation} - -where $Y_{i}$ is the BOLD data of voxel $i$. For each voxel, we calculate the -neural loss aversion $\eta_i$: - -\begin{equation} -\eta_i = (-\beta_{loss}) - \beta_{gain} -\end{equation} - -Using the voxelwise neural loss aversion, we do a region-specific analysis on -BOLD data for each participant. That is, we plot a heat map of $\eta_i$ and -$\beta_{i, loss}$, $ \beta_{i, gain}$ for each participant to find out the -regions with significant activation and regions which show a significant -positive or negative correlation with increasing loss or gain levels. - -\subsubsection{Whole brain analysis of correlation between -neural activity and behavioral response across participants} - -We then apply the above model on the standard brain to analysis the neural -activity and behavioral response across participants. For each participant, -we pick up several regions with highest activation level, calculate the mean -neural loss aversion $\bar{\eta}$ within these specific region. Thus we could -examine the relationship between neural activity and behavioral using the -following regression model: - -\begin{equation} -\lambda = \alpha_0 + \alpha_1 * \eta + \epsilon -\end{equation} - -where the sample size is the number of participants(16). - -\subsubsection{Cross-validation} - -We fit linear models for each voxel for each participant. For each linear -model, we do a k-fold cross-validation. Since the sample size for each linear -regression model range from 80-90, we choose to use 10 fold cross-validation, -which means the original sample is randomly pertitioned into 10 equal sized -subsamples. \\ -In the behavioral analysis using Logistic regression, since the responce -variables are binary, we calculate the misclassification error rate to -summarize the fit. In the neural linear regression model using BOLD data, we -use the mean squared error to summarize the errors. - -\subsubsection{Inferences on Data} - -After fitting regression models on our BOLD and behavioral data, we would try -assessing and validating our models. In order to do this, we would calculate -for the residual sum of squares for our model. We have to do three tests for -the model. The first one is that we calculate the t-statistics and p-value for -our beta coefficients to check whether our beta parameters are statistically -significant at a significance level of 5\%. The second one is that we calculate -the residuals of this linear model and check whether it follows a normal -distribution. The third one is that we calculate the R-Squared value and the -adjusted R-squared value to see whether the values are good for the linear -regression model.\\ - - -\subsection{Explanation on model simplification} - -\subsubsection{Use of Data} -\indent \indent First of all, for simplicity reasons, we are not using all the -regressors the paper used. The model in the paper performed regression on the -BOLD data with gain, loss and euclidean distance to indifference. In our model, -we are leaving out the regressor euclidean distance to indifference. The paper -and its supplement material didn't document the exact way the authors -calculated this parameter; we are having a hard time reproducing this -parameter. Therefore, we decide to leave out this parameter when doing our own -regression. - -\subsubsection{Simplification of regression on BOLD data} - -\indent \indent We plan on simplifying the model on neural data. In the -original data analysis, the authors performed a mixed effect model when -regressing the potential gain and loss values against the BOLD data across -runs, since there are three different runs for each subject and the authors -were trying to incorporate all three runs into one model. The mixed effect -model adds a random effects term, which is associated with individual -experimental units drawn at random from a population. In this case, it -measures the difference between the average brain activation in run i and the -average brain activation in all three runs. - -We are simplifying the model because it is much easier to perform a simple -linear regression in python. In addition, we do not have a great deal of -understanding of fMRI data, so simple linear model would suffice when we are -only performing exploratory data analysis and looking for obvious pattern in -the data. - -After looking at the initial result from our linear regression model, we can -decide whether we want to further explore the relationship between the -dependent variable (BOLD data) and the independent variables (gain and loss) -and whether we want to continue to fit a mixed effect model. - -\subsection{Issues with analyses and potential solutions} - -\subsubsection{Selecting specific regions to further explore -correlation between neural and behavioral activity} - -\indent \indent Since we have no knowledge on the sections of brain that might -experience large difference in activation, it is hard for us to pick the -regions to deeper explore the correspondence between neural and behavioral loss -aversion. - -There are two potential ways to deal with this issue. The first one is to read -more paper and related articles to learn which parts of the brain are likely to -react in our given scenario -- faced with potential gain and loss combinations. -Another way to deal with the issue to to fit a regression for every part of the -brain and look for the areas with higher correspondence (higher slope). Then, -we select and graph a few areas with the most significant positive or negative -correlation between the parametric response to potential losses and behavioral -loss aversion (ln(λ)) across participants. - -\subsubsection{Producing heat map} - -\indent \indent Another issue that we are facing during our project is finding -the same region to plot for each participant. We see that each region of the -brain has its own standard coordinates. However, without much knowledge of -fMRI, we are not sure how to use these standard coordinates to locate the -regions of the brain. - -From our understanding, each subject's brain is mapped onto a standard brain -and we then use the coordinates for the standard brain to extract data from -the areas we are interested in. However, currently, we don't have the skill to -perform this step. - -\subsubsection{Further Research} - -We fit a linear regression model combining behavioral and BOLD data to examine -the relationship of correlation between neural activity and behavioral -response, we use another method which is different from what is mentioned in -the paper. We add the behavioral response to the regression model on BOLD data -as a predictor. We use the original 4-level response as stated below. \\ - -Moreover, if the three tests we do for the linear regression model is bad. We -can plot the independents and the dependents on plots to see whether they fit a -model that is different from linear regression models. There may be another -reason why the performance of linear regression models are bad which is that we -simplify our model that we didn’t try a mixed model as the researchers in the -paper did. - -\begin{tabular}{lllll} -\hline -behavioral response & strongly accept & weakly accept & weakly reject & -strongly reject\\ -\hline -$X_{behav}$ & 1 & 2 & 3 & 4 \\ -\hline -\end{tabular} - -And the models are following: - -\begin{equation} -Y_{i} = \beta_{i, 0} + \beta_{i, behav} * X_{behav} + \epsilon_i -\end{equation} - -However, since the response and level of loss and gain are potentially -correlated, we might need to use stepwise regression to choose the best -predictor from the regression model presented above. - diff --git a/old_paper/sections/results.tex b/old_paper/sections/results.tex deleted file mode 100644 index 4dd83a4..0000000 --- a/old_paper/sections/results.tex +++ /dev/null @@ -1,20 +0,0 @@ -\section{Results} - -\begin{figure}[H] - \centering - \includegraphics[scale=0.5]{figures/dvars_sub9run1.png} -\end{figure} - -\begin{figure}[H] - \centering - \includegraphics[scale=0.5]{figures/mean_sub9run1.png} -\end{figure} - -\begin{figure}[H] - \centering - \includegraphics[scale = 0.5]{figures/fd_sub9run1.png} -\end{figure} - -\begin{center} - Red points signify outliers in all three figures. -\end{center} diff --git a/paper/data_results/temp.txt b/paper/data_results/temp.txt new file mode 100644 index 0000000..2f798ad --- /dev/null +++ b/paper/data_results/temp.txt @@ -0,0 +1 @@ +Temp txt file, delete afterwards