-
Notifications
You must be signed in to change notification settings - Fork 100
/
3dLMEr.R
executable file
·1279 lines (1141 loc) · 60.5 KB
/
3dLMEr.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env AFNI_Batch_R
first.in.path <- function(file) {
ff <- paste(strsplit(Sys.getenv('PATH'),':')[[1]],'/', file, sep='')
ff<-ff[lapply(ff,file.exists)==TRUE];
#cat('Using ', ff[1],'\n');
return(gsub('//','/',ff[1], fixed=TRUE))
}
source(first.in.path('AFNIio.R'))
ExecName <- '3dLMEr'
# Global variables
tolL <- 1e-16 # bottom tolerance for avoiding division by 0 and for avioding analyzing data with most 0's
#################################################################################
##################### Begin 3dLMEr Input functions ################################
#################################################################################
#The help function for 3dLMEr batch (AFNI-style script mode)
help.LME.opts <- function (params, alpha = TRUE, itspace=' ', adieu=FALSE) {
intro <-
'
================== Welcome to 3dLMEr ==================
Program for Voxelwise Linear Mixed-Effects (LME) Analysis
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Version 0.1.5, Dec 19, 2021
Author: Gang Chen (gangchen@mail.nih.gov)
Website - https://afni.nimh.nih.gov/gangchen_homepage
SSCC/NIMH, National Institutes of Health, Bethesda MD 20892, USA
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Introduction
------
Linear Mixed-Effects (LME) analysis adopts the traditional approach that
differentiates two types of effects: fixed effects capture the population-
level components while random effects characterize the lower-level components
such as subjects, families, scanning sites, etc.
3dLMEr is a revised and advanced version of its older brother 3dLME in the sense
that the former is much more flexible in specifying the random-effects components
than the latter. Also, 3dLMEr uses the R package \'lme4\' while 3dLME was written
with the R package \'nlme\', and the statistic values for main effects and
interactions are approximated with the Satterthwaite\'s approach. The greater
flexibility of 3dLMEr lies in its adoption of random-effects notations by the R
package \'lme4\', as nicely summarized in the following table:
http://afni.nimh.nih.gov/sscc/staff/gangc/pub/lmerNotations.pdf
(adopted from https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html)
Similar to 3dLME, all the main effects and interactions are automatically available
in the output while simple effects that tease apart those main effects and
interactions would have to be requested through options -gltCode or -glfCode. Also,
the 3dLMEr interface is largely similar to 3dLME except
1) the random-effects components are incorporated as part of the model
specification, and thus the user is fully responsible in properly formulating the
model structure through \'-model ...\' (option -ranEeff in 3dLME is no longer
necessary for 3dLMEr);
2) the specifications for simple and composite effects through -gltCode and
-glfCode are slightly simplified (the label for each effect is part of -gltCode
and -glfCode, and no more -gltLabel is needed); and
3) all the statistic values for simple effects (specified by the user through -gltCode
and -glfCode) are stored in the output as Z-statistic while main effects, interactions
and the composite effects (automatically generated by 3dLMEr) are represented in the
output as chi-square with 2 degrees of freedom. The fixed number of DFs (i.e., 2) for
the chi-square statistic, regardless of the specific situation, is adopted for
convenience because of the varying DFs due to the Satterthwaite approximation.
If you want to cite the analysis approach, use the following reference:
Chen, G., Saad, Z.S., Britton, J.C., Pine, D.S., Cox, R.W. (2013). Linear
Mixed-Effects Modeling Approach to FMRI Group Analysis. NeuroImage 73:176-190.
http://dx.doi.org/10.1016/j.neuroimage.2013.01.047
Cite the following if test-retest analysis is performed using the trial-level
effect estimates as input with 3dLEMr through the option -TRR:
Chen G, et al., Beyond the intraclass correlation: A hierarchical modeling
approach to test-retest assessment.
https://www.biorxiv.org/content/10.1101/2021.01.04.425305v1
To be added soon---
Input files can be in AFNI, NIfTI, surface (niml.dset) or 1D format. To obtain
the output int the same format of the input, append a proper suffix to the
output specification option -prefix (e.g., .nii, .niml.dset or .1D for NIfTI,
surface or 1D).
3dLMEr allows for the incorporation of various types of explanatory variables
including categorical (between- and within-subject factors) and
quantitative variables (e.g., age, behavioral data). The burden of properly
specifying the structure of lower-level effects is placed on the user\'s
shoulder, so familiarize yourself with the following FAQ in case you want some
clarifications: https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html
Whenever a quantitative variable is involved, it is required to explicitly
declare the variable through option -qVars. In addition, be mindful about the
centering issue of each quantitive quantitative variable: you have to decide
which makes more sense in the research context - global centering or within-
condition (or within-group) centering? Here is some background and discussion
about the issue:
https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/statistics/center.html
The following exemplifying scripts are good demonstrations. More examples will
be added in the future if I could crowdsource more scenarios from the users
(including you the reader). In case you find one example similar to your data
structure, use the example(s) as a template and then build up your own script.
In addition to R installation, the following R packages need to be installed
first before running 3dLMEr: "lmerTest", "phia" and "snow". To install these R
packages, run the following command at the terminal:
rPkgsInstall -pkgs "lmerTest,phia,snow"
Alternatively you may install them in R:
install.packages("lmerTest")
install.packages("phia")
install.packages("snow")
Once the 3dLMEr command script is constructed, it can be run by copying and
pasting to the terminal. Alternatively (and probably better) you save the
script as a text file, for example, called LME.txt, and execute it with the
following (assuming on tc shell),
nohup tcsh -x LME.txt &
or,
nohup tcsh -x LME.txt > diary.txt &
or,
nohup tcsh -x LME.txt |& tee diary.txt &
The advantage of the latter commands is that the progression is saved into
the text file diary.txt and, if anything goes awry, can be examined later.\n'
ex1 <-
"Example 1 --- Simplest case: LME analysis for one group of subjects each of
which has three effects associated with three emotions (pos, neg and neu),
and the effects of interest are the comparisons among the three emotions
at the populaton level (missing data allowed). This data structure is usually
considered as one-way repeated-measures (or within-subject) ANOVA if no
missing data occurred. The LME model is typically formulated with a random
intercept in this case. With the option -bounds, values beyond [-2, 2] will
be treated as outliers and considered as missing. If you want to set a range,
choose the bounds that make sense with your input data.
-------------------------------------------------------------------------
3dLMEr -prefix LME -jobs 12 \\
-mask myMask+tlrc \\
-model 'emotion+(1|Subj)' \\
-bounds -2 2 \\
-gltCode pos 'emotion : 1*pos' \\
-gltCode neg 'emotion : 1*neg' \\
-gltCode neu 'emotion : 1*neu' \\
-gltCode pos-neg 'emotion : 1*pos -1*neg' \\
-gltCode pos-neu 'emotion : 1*pos -1*neu' \\
-gltCode neg-neu 'emotion : 1*neg -1*neu' \\
-gltCode em-eff1 'emotion : 0.5*pos +0.5*neg -1*neu' \\
-glfCode em-eff2 'emotion : 1*pos -1*neg & 1*pos -1*neu' \\
-dataTable \\
Subj emotion InputFile \\
s1 pos s1_pos+tlrc \\
s1 neg s1_neg+tlrc \\
s1 neu s1_neu+tlrc \\
s2 pos s2_pos+tlrc \\
s2 neg s2_neg+tlrc \\
s2 pos s2_neu+tlrc \\
...
s20 pos s20_pos+tlrc \\
s20 neg s20_neg+tlrc \\
s20 neu s20_neu+tlrc \\
...
\n"
ex2 <-
"Example 2 --- LME analysis for one group of subjects each of which has
three effects associated with three emotions (pos, neg and neu), and the
effects of interest are the comparisons among the three emotions at the
population level. In addition, reaction time (RT) is available per emotion
from each subject. An LME model can be formulated to include both random
intercept and random slope. Be careful about the centering issue about any
quantitative variable: you have to decide which makes more sense - global
centering or within-condition (or within-group) centering?
-------------------------------------------------------------------------
3dLMEr -prefix LME -jobs 12 \\
-mask myMask+tlrc \\
-model 'emotion*RT+(RT|Subj)' \\
-bounds -2 2 \\
-qVars 'RT' \\
-qVarCenters 0 \\
-gltCode pos 'emotion : 1*pos' \\
-gltCode neg 'emotion : 1*neg' \\
-gltCode neu 'emotion : 1*neu' \\
-gltCode pos-neg 'emotion : 1*pos -1*neg' \\
-gltCode pos-neu 'emotion : 1*pos -1*neu' \\
-gltCode neg-neu 'emotion : 1*neg -1*neu' \\
-gltCode em-eff1 'emotion : 0.5*pos +0.5*neg -1*neu' \\
-glfCode em-eff2 'emotion : 1*pos -1*neg & 1*pos -1*neu' \\
-dataTable \\
Subj emotion RT InputFile \\
s1 pos 23 s1_pos+tlrc \\
s1 neg 34 s1_neg+tlrc \\
s1 neu 28 s1_neu+tlrc \\
s2 pos 31 s2_pos+tlrc \\
s2 neg 22 s2_neg+tlrc \\
s2 pos 29 s2_neu+tlrc \\
...
s20 pos 12 s20_pos+tlrc \\
s20 neg 20 s20_neg+tlrc \\
s20 neu 30 s20_neu+tlrc \\
...
\n"
ex3 <-
"Example 3 --- LME analysis for one group of subjects each of which has three
effects associated with three emotions (pos, neg and neu), and the effects
of interest are the comparisons among the three emotions at the populaton
level. As the data were acquired across 12 scanning sites, we set up an LME
model with a crossed random-effects structure, one for cross-subjects and one
for cross-sites variability.
-------------------------------------------------------------------------
3dLMEr -prefix LME -jobs 12 \\
-mask myMask+tlrc \\
-model 'emotion+(1|Subj)+(1|site)' \\
-bounds -2 2 \\
-gltCode pos 'emotion : 1*pos' \\
-gltCode neg 'emotion : 1*neg' \\
-gltCode neu 'emotion : 1*neu' \\
-gltCode pos-neg 'emotion : 1*pos -1*neg' \\
-gltCode pos-neu 'emotion : 1*pos -1*neu' \\
-gltCode neg-neu 'emotion : 1*neg -1*neu' \\
-gltCode em-eff1 'emotion : 0.5*pos +0.5*neg -1*neu' \\
-glfCode em-eff2 'emotion : 1*pos -1*neg & 1*pos -1*neu' \\
-dataTable \\
Subj emotion site InputFile \\
s1 pos site1 s1_pos+tlrc \\
s1 neg site1 s1_neg+tlrc \\
s1 neu site2 s1_neu+tlrc \\
s2 pos site1 s2_pos+tlrc \\
s2 neg site2 s2_neg+tlrc \\
s2 pos site3 s2_neu+tlrc \\
...
s80 pos site12 s80_pos+tlrc \\
s80 neg site12 s80_neg+tlrc \\
s80 neu site10 s80_neu+tlrc \\
...
\n"
ex4 <-
"Example 4 --- Test-retest reliability. LME model can be adopted for test-
retest reliability analysis if trial-level effect estimates (e.g., using
option -stim_times_IM in 3dDeconvolve/3dREMLfit) are available from each
subjects. The following script demonstrates a situation where each subject
performed same two tasks across two sessions. The goal is to obtain the
test-retest reliability at the whole-brain voxel level for the contrast
between the two tasks with the test-retest reliability for the average
effect between the two tasks as a byproduct.
WARNING: numerical failures may occur, especially for a contrast between
two conditions. The failures manifest with a large portion of 0, 1 and -1
values in the output. In that case, use the program TRR to conduct
region-level test-retest reliability analysis.
-------------------------------------------------------------------------
3dLMEr -prefix output -TRR -jobs 16 \
-qVars 'cond' \
-bounds -2 2 \
-model '0+sess+cond:sess+(0+sess|Subj)+(0+cond:sess|Subj)' \
-dataTable @data.tbl
With many trials per condition, it is recommended that the data table
is saved as a separate file in pure text of long format with condition
(variable 'cond' in the script above) through dummy coding of -0.5 and
0.5 with the option -qVars 'cond'. Code subject and session as factor
labels with labels. Below is an example of the data table. There is no
need to add backslash at the end of each line. If sub-brick selector
is used, do NOT use gziped files (otherwise the file reading time would
be too long) and do NOT add quotes around the square brackets [] for the
sub-brick selector.
Subj sess cond InputFile
Subj1 s1 -0.5 Subj1s1c1_trial1.nii
Subj1 s1 -0.5 Subj1s1c1_trial2.nii
...
Subj1 s1 -0.5 Subj1s1c1_trial40.nii
Subj1 s1 0.5 Subj1s1c2_trial1.nii
Subj1 s1 0.5 Subj1s1c2_trial2.nii
...
Subj1 s1 0.5 Subj1s1c2_trial40.nii
Subj1 s2 -0.5 Subj1s2c1_trial1.nii
Subj1 s2 -0.5 Subj1s2c1_trial2.nii
...
Subj1 s2 -0.5 Subj1s2c1_trial40.nii
Subj1 s2 0.5 Subj1s2c2_trial1.nii
Subj1 s2 0.5 Subj1s2c2_trial2.nii
...
Subj1 s2 0.5 Subj1s2c2_trial40.nii
...
\n"
parnames <- names(params)
ss <- vector('character')
if(alpha) {
parnames <- sort(parnames)
ss <- paste('Options in alphabetical order:\n',
'------------------------------\n', sep='')
} else ss <- paste('Options:\n', '--------\n', sep='')
for(ii in 1:length(parnames)) {
op <- params[parnames[ii]][[1]]
if(!is.null(op$help)) ss <- c(ss , paste(itspace, op$help, sep='')) else
ss <- c(ss, paste(itspace, parnames[ii], '(no help available)\n', sep=''))
}
ss <- paste(ss, sep='\n')
cat(intro, ex1, ex2, ex3, ex4, ss, sep='\n')
if (adieu) exit.AFNI();
}
#Change command line arguments into an options list
read.LME.opts.batch <- function (args=NULL, verb = 0) {
params <- list (
'-prefix' = apl(n = 1, d = NA, h = paste(
"-prefix PREFIX: Output file name. For AFNI format, provide prefix only,",
" with no view+suffix needed. Filename for NIfTI format should have",
" .nii attached (otherwise the output would be saved in AFNI format).\n", sep = '\n'
) ),
'-resid' = apl(n = 1, d = NA, h = paste(
"-resid PREFIX: Output file name for the residuals. For AFNI format, provide",
" prefix only without view+suffix. Filename for NIfTI format should",
" have .nii attached, while file name for surface data is expected",
" to end with .niml.dset. The sub-brick labeled with the '(Intercept)',",
" if present, should be interpreted as the effect with each factor",
" at the reference level (alphabetically the lowest level) for each",
" factor and with each quantitative covariate at the center value.\n", sep = '\n'
) ),
'-mask' = apl(n=1, d = NA, h = paste(
"-mask MASK: Process voxels inside this mask only.\n",
" Default is no masking.\n"
) ),
'-jobs' = apl(n = 1, d = 1, h = paste(
"-jobs NJOBS: On a multi-processor machine, parallel computing will speed ",
" up the program significantly.",
" Choose 1 for a single-processor computer.\n", sep = '\n'
) ),
'-IF' = apl(n = 1, d = NA, h = paste(
"-IF var_name: var_name is used to specify the column name that is designated for",
" input files of effect estimate. The default (when this option is not invoked",
" is 'InputFile', in which case the column header has to be exactly as 'InputFile'",
" This input file for effect estimates has to be the last column.\n", sep = '\n'
) ),
'-model' = apl(n = 1, d = 1, h = paste(
"-model FORMULA: Specify the model structure for all the variables. The",
" expression FORMULA with more than one variable has to be surrounded",
" within (single or double) quotes. Variable names in the formula",
" should be consistent with the ones used in the header of -dataTable.",
" In the LME context the simplest model is \"1+(1|Subj)\" in",
" which the random effect from each of the two subjects in a pair is",
" symmetrically incorporated in the model. Each random-effects factor is",
" specified within parentheses per formula convention in R. Any",
" effects of interest and confounding variables (quantitative or",
" categorical variables) can be added as fixed effects without parentheses.\n", sep = '\n'
) ),
'-dbgArgs' = apl(n=0, h = paste(
"-dbgArgs: This option will enable R to save the parameters in a",
" file called .3dLMEr.dbg.AFNI.args in the current directory",
" so that debugging can be performed.\n", sep='\n')),
'-TRR' = apl(n=0, h = paste(
"-TRR: This option will allow the analyst to perform test-retest reliability analysis",
" at the whole-brain voxel level. To be able to adopt this modeling approach,",
" trial-level effect estimates have to be provided from each subject (e.g.,",
" using option -stim_times_IM in 3dDeconvolve/3dREMLfit). Currently it works",
" with the situation with two conditions for a group of subjects that went",
" two sessions. The analytical goal to assess test-retest reliability across",
" the two sessions for the contrast between the two conditions. Check out",
" Example 4 for model specification. It is possible that numerical failures",
" may occur for a contrast between two conditions with values of 0, 1 or -1 in",
" the output. Use program TRR for ROI-level test-retest reliability analysis.\n", sep='\n')),
'-bounds' = apl(n=2, h = paste(
"-bounds lb ub: This option is for outlier removal. Two numbers are expected from",
" the user: the lower bound (lb) and the upper bound (ub). The input data will",
" be confined within [lb, ub]: any values in the input data that are beyond",
" the bounds will be removed and treated as missing. Make sure the first number",
" less than the second. You do not have to use this option to censor your data!\n", sep='\n')),
'-qVars' = apl(n=c(1,100), d=NA, h = paste(
"-qVars variable_list: Identify quantitative variables (or covariates) with",
" this option. The list with more than one variable has to be",
" separated with comma (,) without any other characters such as",
" spaces and should be surrounded within (single or double) quotes.",
" For example, -qVars \"Age,IQ\"",
" WARNINGS:",
" 1) Centering a quantitative variable through -qVarsCenters is",
" very critical when other fixed effects are of interest.",
" 2) Between-subjects covariates are generally acceptable.",
" However EXTREME caution should be taken when the groups",
" differ substantially in the average value of the covariate.",
" \n",
sep = '\n'
) ),
'-vVars' = apl(n=c(1,100), d=NA, h = paste(
"-vVars variable_list: Identify voxel-wise covariates with this option.",
" Currently one voxel-wise covariate is allowed only. By default",
" mean centering is performed voxel-wise across all subjects.",
" Alternatively centering can be specified through a global value",
" under -vVarsCenters. If the voxel-wise covariates have already",
" been centered, set the centers at 0 with -vVarsCenters.\n",
sep = '\n'
) ),
'-vVarCenters' = apl(n=1, d=NA, h = paste(
"-vVarCenters VALUES: Specify centering values for voxel-wise covariates",
" identified under -vVars. Multiple centers are separated by ",
" commas (,) within (single or double) quotes. The order of the",
" values should match that of the quantitative variables in -qVars.",
" Default (absence of option -vVarsCenters) means centering on the",
" average of the variable across ALL subjects regardless their",
" grouping. If within-group centering is desirable, center the",
" variable yourself first before the files are fed under -dataTable.\n",
sep = '\n'
) ),
'-gltCode' = apl(n=c(1,1000), d=NA, h = paste(
"-gltCode label weights: Specify the label and weights of interest in a general",
" linear t-style (GLT) formulation in which only one null relationship is",
" involved (cf. -glfCode). The weights should be surrounded with quotes. For",
" example, the specification '-gltCode AvB 'Condition : 1*A -1*B' compares A",
" and B with a label 'AvB' for the output sub-bricks. \n", sep = '\n'
) ),
'-glfCode' = apl(n=c(1,1000), d=NA, h = paste(
"-glfCode label CODING: Specify a general linear F-style (GLF) formulation",
" with the weights among factor levels in which two or more null",
" relationships (e.g., A-B=0 and B-C=0) are innvolved. The symbolic",
" coding has to be within (single or double) quotes. For example, the",
" coding '-glfCode AvBvc 'Condition : 1*A -1*B & 1*A -1*C Emotion : 1*pos''",
" examines the main effect of Condition at the positive Emotion with",
" the output labeled as AvBvC. Similarly the coding '-glfCode CondByEmo'",
" 'Condition : 1*A -1*B & 1*A -1*C Emotion : 1*pos -1*neg' looks",
" for the interaction between the three levels of Condition and the",
" two levels of Emotion and the resulting sub-brick is labeled as",
" 'CondByEmo'.\n",
" NOTE:\n",
" 1) The weights for a variable do not have to add up to 0.\n",
" 2) When a quantitative variable is present, other effects are",
" tested at the center value of the covariate unless the covariate",
" value is specified as, for example, 'Group : 1*Old Age : 2', where",
" the Old Group is tested at the Age of 2 above the center.\n",
" 3) The absence of a categorical variable in a coding means the",
" levels of that factor are averaged (or collapsed) for the GLF.\n",
" 4) The appearance of a categorical variable has to be followed",
" by the linear combination of its levels.\n",
sep = '\n'
) ),
# '-cVars' = apl(n=c(1,100), d=NA, h = paste(
# "-cVars variable_list: Identify categorical (qualitive) variables (or",
# " factors) with this option. The list with more than one variable",
# " has to be separated with comma (,) without any other characters such",
# " as spaces and should be surrounded within (single or double) quotes.",
# " For example, -cVars \"sex,site\"\n",
# sep = '\n'
# ) ),
#
'-qVarCenters' = apl(n=c(1,100), d=NA, h = paste(
"-qVarCenters VALUES: Specify centering values for quantitative variables",
" identified under -qVars. Multiple centers are separated by ",
" commas (,) without any other characters such as spaces and should",
" be surrounded within (single or double) quotes. The order of the",
" values should match that of the quantitative variables in -qVars.",
" Default (absence of option -qVarCenters) means centering on the",
" average of the variable across ALL subjects regardless their",
" grouping. If within-group centering is desirable, center the",
" variable YOURSELF first before the values are fed into -dataTable.\n",
sep = '\n'
) ),
'-dataTable' = apl(n=c(1, 1000000), d=NA, h = paste(
"-dataTable TABLE: List the data structure with a header as the first line.\n",
" NOTE:\n",
" 1) This option has to occur last in the script; that is, no other",
" options are allowed thereafter. Each line should end with a backslash",
" except for the last line.\n",
" 2) The order of the columns should not matter except that the last",
" column has to be the one for input files, 'InputFile'. Unlike 3dLME, the",
" subject column (Subj in 3dLME) does not have to be the first column;",
" and it does not have to include a subject ID column under some situations",
" Each row should contain only one input file in the table of long format",
" (cf. wide format) as defined in R. Input files can be in AFNI, NIfTI or",
" surface format. AFNI files can be specified with sub-brick selector (square",
" brackets [] within quotes) specified with a number or label.\n",
" 3) It is fine to have variables (or columns) in the table that are",
" not modeled in the analysis.\n",
" 4) When the table is part of the script, a backslash is needed at the end",
" of each line (except for the last line) to indicate the continuation to the",
" next line. Alternatively, one can save the context of the table as a separate",
" file, e.g., calling it table.txt, and then in the script specify the data",
" with '-dataTable @table.txt'. However, when the table is provided as a",
" separate file, do NOT put any quotes around the square brackets for each",
" sub-brick,otherwise the program would not properly read the files, unlike the",
" situation when quotes are required if the table is included as part of the",
" script. Backslash is also not needed at the end of each line, but it would",
" not cause any problem if present. This option of separating the table from",
" the script is useful: (a) when there are many input files so that the program",
" complains with an 'Arg list too long' error; (b) when you want to try",
" different models with the same dataset.\n",
sep = '\n'
) ),
'-help' = apl(n=0, h = '-help: this help message\n'),
'-show_allowed_options' = apl(n=0, h=
"-show_allowed_options: list of allowed options\n" ),
'-cio' = apl(n=0, h = paste(
"-cio: Use AFNI's C io functions, which is the default. Alternatively -Rio",
" can be used.\n", sep='\n')),
'-Rio' = apl(n=0, h = "-Rio: Use R's io functions. The alternative is -cio.\n")
)
#browser()
ops <- parse.AFNI.args(args, params, other_ok=FALSE)
if (verb) show.AFNI.args(ops, verb=0, hstr='')
if (is.null(ops))
errex.AFNI('Error parsing arguments. See 3dLMEr -help for details.')
#Parse dems options
#initialize with defaults
com_history<-AFNI.command.history(ExecName, args,NULL)
lop <- list (com_history = com_history)
lop$nNodes <- 1
lop$cutoff <- 0
#lop$fixEff <- 1
lop$maskFN <- NA
lop$IF <- 'InputFile' #default
#lop$ranEff <- NA
lop$model <- NA
lop$qVars <- NA
lop$bounds <- NULL
lop$vVars <- NA
lop$vQV <- NA
lop$qVarCenters <- NA
lop$vVarCenters <- NA
lop$gltCode <- NULL
lop$glfCode <- NULL
lop$dataTable <- NULL
lop$iometh <- 'clib'
lop$dbgArgs <- FALSE # for debugging purpose
lop$TRR <- FALSE # for test-retest reliability
lop$verb <- 0
#Get user's input
for (i in 1:length(ops)) {
opname <- strsplit(names(ops)[i],'^-')[[1]];
opname <- opname[length(opname)];
switch(opname,
prefix = lop$outFN <- pprefix.AFNI.name(ops[[i]]),
resid = lop$resid <- pprefix.AFNI.name(ops[[i]]),
mask = lop$maskFN <- ops[[i]],
jobs = lop$nNodes <- ops[[i]],
model = lop$model <- ops[[i]],
IF = lop$IF <- ops[[i]],
#num_glt = lop$num_glt <- ops[[i]],
gltCode = lop$gltCode <- ops[[i]],
glfCode = lop$glfCode <- ops[[i]],
qVars = lop$qVars <- ops[[i]],
bounds = lop$bounds <- ops[[i]],
vVars = lop$vVars <- ops[[i]],
qVarCenters = lop$qVarCenters <- ops[[i]],
vVarCenters = lop$vVarCenters <- ops[[i]],
dataTable = lop$dataTable <- dataTable.AFNI.parse(ops[[i]]),
help = help.LME.opts(params, adieu=TRUE),
dbgArgs = lop$dbgArgs <- TRUE,
TRR = lop$TRR <- TRUE,
cio = lop$iometh<-'clib',
Rio = lop$iometh<-'Rlib'
)
}
return(lop)
}# end of read.LME.opts.batch
glfConstr <- function(cStr, dataStr) {
pos <- which(cStr==":")
vars <- cStr[pos-1] # factors to be specified
nvar <- length(vars)
pos <- c(pos, length(cStr)+2) # add an artificial one for convenient usage below
varsOK <- vars %in% colnames(dataStr)
if(all(varsOK)) {
glfList <- vector('list', nvar)
names(glfList) <- vars
for(ii in 1:nvar) {
lvl <- levels(dataStr[,vars[ii]]) # all the levels for each involved factor
fpos <- which(cStr[(pos[ii]+1):(pos[ii+1]-2)]=="&") # positions of &'s
nc <- length(fpos) # number of columns for this factor minus 1
glfList[[ii]] <- matrix(0, nrow=length(lvl), ncol=nc+1)
fpos <- c(0, fpos, length(cStr[(pos[ii]+1):(pos[ii+1]-2)])+1)
for(jj in 1:(length(fpos)-1)) {
sepTerms <- unlist(lapply(cStr[(pos[ii]+1):(pos[ii+1]-2)][(fpos[jj]+1):(fpos[jj+1]-1)], strsplit, '\\*'))
lvlInv <- sepTerms[seq(2,length(sepTerms),2)] # levels involved
lvlOK <- lvlInv %in% lvl
if(all(lvlOK)) {
sq <- match(lvlInv, lvl)
glfList[[ii]][sq,jj] <- as.numeric(sepTerms[seq(1,length(sepTerms),2)])
} else errex.AFNI(paste("Incorrect level coding in variable", vars[ii],
": ", lvlInv[which(!lvlOK)], " \n "))
}
#}
}
return(glfList)
} else errex.AFNI(paste("Incorrect variable name in GLF coding: ", vars[which(!varsOK)], " \n "))
}
# glfConstr(lop$glfCode[[1]], lop$dataStr)
#Change options list to 3dLMEr variable list
process.LME.opts <- function (lop, verb = 0) {
if(is.null(lop$outFN)) errex.AFNI(c("Output filename not specified! Add filename with -prefix.\n"))
an <- parse.AFNI.name(lop$outFN)
if(an$type == "NIML") {
if(file.exists(lop$outFN)) errex.AFNI(c("File ", lop$outFN, " exists! Try a different name.\n"))
} else if(file.exists(paste(lop$outFN,"+tlrc.HEAD", sep="")) ||
file.exists(paste(lop$outFN,"+tlrc.BRIK", sep="")) ||
file.exists(paste(lop$outFN,"+orig.HEAD", sep="")) ||
file.exists(paste(lop$outFN,"+orig.BRIK", sep=""))) {
errex.AFNI(c("File ", lop$outFN, " exists! Try a different name.\n"))
return(NULL)
}
#browser()
if(an$type != 'BRIK' && lop$iometh != 'clib')
errex.AFNI(c('Must of use -cio option with any input/output ',
'format other than BRIK'))
if(!is.null(lop$resid)) {
an2 <- parse.AFNI.name(lop$resid)
if(an2$type == "NIML") {
if(file.exists(lop$resid)) errex.AFNI(c("File ", lop$resid, " exists! Try a different name.\n"))
} else if(file.exists(paste(lop$resid,"+tlrc.HEAD", sep="")) ||
file.exists(paste(lop$resid,"+tlrc.BRIK", sep="")) ||
file.exists(paste(lop$resid,"+orig.HEAD", sep="")) ||
file.exists(paste(lop$resid,"+orig.BRIK", sep=""))) {
errex.AFNI(c("File ", lop$resid, " exists! Try a different name.\n"))
return(NULL)
}
if(an2$type != 'BRIK' && lop$iometh != 'clib')
errex.AFNI(c('Must of use -cio option with any input/output ',
'format other than BRIK'))
}
# assume the quantitative variables are separated by + here
if(!is.na(lop$qVars)) lop$QV <- strsplit(lop$qVars, '\\,')[[1]]
if(!is.na(lop$vVars[1])) lop$vQV <- strsplit(lop$vVars, '\\,')[[1]]
if(!(is.null(lop$bounds))) {
if(lop$bounds[1] > lop$bounds[2])
errex.AFNI(paste0('Incorrect setting with option -bounds! The lower bound ', lop$bounds[1],
' should be smaller than the upper bound ', lop$bounds[2], '!'))
}
len <- length(lop$dataTable)
wd <- which(lop$dataTable == lop$IF) # assuming the input file is the last column here!
hi <- len / wd - 1
if(len %% wd != 0)
errex.AFNI(paste('The content under -dataTable is not rectangular!', len, wd)) else {
lop$dataStr <- NULL
for(ii in 1:wd) lop$dataStr <- data.frame(cbind(lop$dataStr, lop$dataTable[seq(wd+ii, len, wd)]))
names(lop$dataStr) <- lop$dataTable[1:wd]
# wow, terrible mistake here with as.numeric(lop$dataStr[,jj])
#if(!is.na(lop$qVars)) for(jj in lop$QV) lop$dataStr[,jj] <- as.numeric(lop$dataStr[,jj])
if(!is.na(lop$qVars)) for(jj in lop$QV) lop$dataStr[,jj] <- as.numeric(as.character(lop$dataStr[,jj]))
if(!is.na(lop$vVars[1])) for(jj in lop$vQV) lop$dataStr[,jj] <- as.character(lop$dataStr[,jj])
for(ii in 1:(wd-1)) if(sapply(lop$dataStr, class)[ii] == "character")
lop$dataStr[,ii] <- as.factor(lop$dataStr[,ii])
}
if(!is.null(lop$gltCode)) {
lop$gltLabel <- unlist(lapply(lop$gltCode, `[`, 1))
lop$num_glt <- length(lop$gltLabel)
glt <- gl_Constr(lop$num_glt, lapply(lop$gltCode, '[',-1), lop)
lop$gltList <- glt[[1]]
lop$slpList <- glt[[2]]
lop$covValList <- glt[[3]]
} else lop$num_glt <- 0
if(!is.null(lop$glfCode)) {
lop$glfLabel <- unlist(lapply(lop$glfCode, `[`, 1))
lop$num_glf <- length(lop$glfLabel)
glf <- gl_Constr(lop$num_glf, lapply(lop$glfCode, '[',-1), lop)
lop$glfList <- glf[[1]]
lop$slpListF <- glf[[2]]
lop$covValListF <- glf[[3]]
} else lop$num_glf <- 0
if(lop$iometh == 'Rlib') {
lop$outFN <- paste(lop$outFN, "+tlrc", sep="")
if(!is.null(lop$resid)) lop$resid <- paste(lop$resid, "+tlrc", sep="")
} else {
#an <- parse.AFNI.name(lop$outFN)
if(an$type == "BRIK" && an$ext == "" && is.na(an$view))
lop$outFN <- paste(lop$outFN, "+tlrc", sep="")
if (exists.AFNI.name(lop$outFN) ||
exists.AFNI.name(modify.AFNI.name(lop$outFN,"view","+tlrc")))
errex.AFNI(c("File ", lop$outFN, " exists! Try a different name.\n"))
if(!is.null(lop$resid)) {
#an2 <- paste(lop$resid, "+tlrc", sep="")
if(an2$type == "BRIK" && an2$ext == "" && is.na(an2$view))
lop$resid <- paste(lop$resid, "+tlrc", sep="")
if (exists.AFNI.name(lop$resid) ||
exists.AFNI.name(modify.AFNI.name(lop$resid,"view","+tlrc")))
errex.AFNI(c("File ", lop$resid, " exists! Try a different name.\n"))
}
}
if(lop$nNodes < 1) lop$nNodes <- 1
if(!is.na(lop$maskFN)) {
if(verb) cat("Will read ", lop$maskFN,'\n')
if(is.null(mm <- read.AFNI(lop$maskFN, verb=lop$verb, meth=lop$iometh, forcedset = TRUE))) {
warning("Failed to read mask", immediate.=TRUE)
return(NULL)
}
#lop$maskData <- mm$brk[,,,1]
lop$maskData <- mm$brk
if(verb) cat("Done read ", lop$maskFN,'\n')
if(dim(mm$brk)[4] > 1) stop("More than 1 sub-brick in the mask file!")
}
#if(!is.na(lop$maskFN))
# if(!all(dim(lop$maskData)==lop$myDim[1:3]))
# stop("Mask dimensions don't match the input files!")
return(lop)
}
# process.LME.opts(lop, 0)
#################################################################################
################# LME Computation functions ##############################
#################################################################################
# LME: bare-bone approach with LME for LME: some voxels may have 0 LME values: effect estimates as input only
runLME <- function(myData, DM, tag) {
#browser()
Stat <- rep(0, lop$NoBrick)
if(!is.null(lop$resid)) {
resid <- rep(0, length(myData))
res.na <- which(is.na(myData)) # indices for missing data
}
if(any(!is.na(lop$vQV))) { # voxel-wise centering for voxel-wise covariate
DM <- assVV2(DM, lop$vQV, myData[(length(myData)/2+1):length(myData)], all(is.na(lop$vVarCenters)))
}
if(!all(myData == 0)) {
DM$yy <- myData[1:nrow(DM)]
fm <- NULL
options(warn=-1)
try(fm <- lmer(lop$model, data=DM), silent=TRUE)
if(!is.null(fm)) {
#Stat[1:lop$nF] <- anova(fm)$`F value` # F-stat
Stat[1:lop$nF] <- qchisq(anova(fm)$`Pr(>F)`, 2, lower.tail = F) # convert to chisq
#qnorm(anova(fm)$`Pr(>F)`/2, lower.tail = F) # Z-stat: should use one-tailed!
if(lop$num_glt > 0) for(ii in 1:lop$num_glt) {
tt <- NULL
if(is.na(lop$gltList[[ii]])) tt <- tryCatch(testInteractions(fm, pairwise=NULL, slope=lop$slpList[[ii]],
covariates=lop$covValList[[ii]], adjustment="none"), error=function(e) NULL) else
tt <- tryCatch(testInteractions(fm, custom=lop$gltList[[ii]], slope=lop$slpList[[ii]],
covariates=lop$covValList[[ii]], adjustment="none"), error=function(e) NULL)
if(!is.null(tt)) {
Stat[lop$nF[1]+2*ii-1] <- tt[1,'Value']
Stat[lop$nF[1]+2*ii] <- sign(tt[1,'Value'])*qnorm(tt[1,'Pr(>Chisq)']/2, lower.tail = F) # convert chisq to Z
}
}
if(lop$num_glf > 0) for(ii in 1:lop$num_glf) {
ff <- NULL
if(is.na(lop$glfList[[ii]])) ff <- tryCatch(testFactors(fm, pairwise=NULL,
slope=lop$slpListF[[ii]], covariates=lop$covValListF[[ii]],
adjustment="none")$terms$`(Intercept)`$test, error=function(e) NULL) else
ff <- tryCatch(testFactors(fm, levels=lop$glfList[[ii]],
slope=lop$slpListF[[ii]], covariates=lop$covValListF[[ii]],
adjustment="none")$terms$`(Intercept)`$test, error=function(e) NULL)
if(!is.null(ff)) {
Stat[lop$nF[1]+2*lop$num_glt+ii] <- qchisq(ff[2,'Pr(>Chisq)'], 2, lower.tail = F) # glf[2,2] # convert chisq to Z
}
}
if(!is.null(lop$resid)) {
resid <- unname(residuals(fm))
if(!is.null(res.na)) for(aa in res.na) resid <- append(resid, 0, after=aa-1) # fill in missing data with 0s
}
}
}
if(!is.null(lop$resid)) Stat <- c(Stat, resid)
return(Stat)
}
# runLME(inData[30,30,30,], lop$dataStr, 0)
# for LME only, not TRR yet
assVV2 <- function(DF, vQV, value, c) {
# centering - c: center; value: voxel-wise value; vQV: voxel-wise variable name; DF: dataframe
#browser()
if(is.na(c)) DF[, vQV] <- scale(value, center=TRUE, scale=F) else
DF[, vQV] <- scale(value, center=c, scale=F)
DF[, vQV] <- as.numeric(DF[, vQV])
return(DF)
}
runTRR <- function(myData, DM, tag) { # assuming 2 conditions and 2 sessions
#browser()
Stat <- rep(0, lop$NoBrick)
if(!all(myData[!is.na(myData)] == 0)) {
DM$yy <- myData
fm <- NULL
options(warn=-1)
try(fm <- lmer(lop$model, data=DM), silent=TRUE)
if(!is.null(fm)) {
mm <- summary(fm)
es <- mm$coefficients[,'Estimate']
zz <- sign(es)*abs(qnorm(mm$coefficients[,'Pr(>|t|)']/2))
Stat <- c(c(rbind(es, zz)), attr(mm$varcor[[1]], "correlation")[1,2], attr(mm$varcor[[2]], "correlation")[1,2])
}
}
return(Stat)
}
# runTRR(inData[30,30,30,], lop$dataStr, 0)
#################################################################################
########################## Begin LME main ######################################
#################################################################################
if(!exists('.DBG_args')) {
args = (commandArgs(TRUE))
rfile <- first.in.path(sprintf('%s.R',ExecName))
# save only on -dbg_args 28 Apr 2016 [rickr]
if ( '-dbgArgs' %in% args ) {
try(save(args, rfile, file=".3dLMEr.dbg.AFNI.args", ascii = TRUE), silent=TRUE)
}
} else {
note.AFNI("Using .DBG_args resident in workspace")
args <- .DBG_args
}
if(!length(args)) {
BATCH_MODE <<- 0
cat(greeting.LME(),
"Use CNTL-C on Unix or ESC on GUI version of R to stop at any moment.\n",
sep='\n')
#browser()
if(length(args)<6) modFile <- "model.txt" else modFile <- args[6]
if (is.null(lop <- read.LME.opts.from.file(modFile, verb=0))) {
stop('Error parsing input from file!');
}
if(0) str(lop)
} else {
if(!exists('.DBG_args')) {
BATCH_MODE <<- 1
} else {
BATCH_MODE <<- 0
}
if(is.null(lop <- read.LME.opts.batch(args, verb = 0)))
stop('Error parsing input')
#str(lop);
if(is.null(lop <- process.LME.opts(lop, verb = lop$verb)))
stop('Error processing input')
}
#if(lop$verb > 1) {
#Too much output, big dump of header structs of input dsets..
# str(lop)
#}
########################################################################
# in case the user didn't put space around each colon (:), this
#lop$gltCode <- lapply(lop$gltCode, function(ss) unlist(strsplit(ss, split="(?=:)", perl=TRUE)))
if(!is.na(lop$qVarCenters)) lop$qVarCenters <- as.numeric(strsplit(as.character(lop$qVarCenters), '\\,')[[1]])
# effect coding leads to the same type III as SAS
options(contrasts = c("contr.sum", "contr.poly"))
# standardize the names for Y, ROI and subject
#names(lop$dataStr)[names(lop$dataStr)==lop$Subj] <- 'Subj'
names(lop$dataStr)[names(lop$dataStr)==lop$IF] <- 'InputFile'
# Maybe not list for these two, or yes?
#lop$dataStr$Subj <- as.factor(lop$dataStr$Subj)
lop$dataStr$InputFile <- as.character(lop$dataStr$InputFile)
#if(is.null(lop$Tstat)) lop$dataStr$Tstat <- as.character(lop$dataStr$Tstat)
# center on user-speficied value or mean
if(!lop$TRR)
if(any(!is.na(lop$qVars))) if(all(is.na(lop$qVarCenters)))
lop$dataStr[,lop$QV] <- scale(lop$dataStr[,lop$QV], center=TRUE, scale=F)[,,drop=T] else
lop$dataStr[,lop$QV] <- scale(lop$dataStr[,lop$QV], center=lop$qVarCenters, scale=F)[,,drop=T]
cat('\n++++++++++++++++++++++++++++++++++++++++++++++++++++\n')
cat('***** Summary information of data structure *****\n')
#nS <- levels(lop$dataStr$Subj) # total number of subjects
nF <- dim(lop$dataStr[1])[1] # number of input files
#cat(nS, 'subjects in total:', nS, '\n')
cat(nF, 'response values\n')
if(dim(lop$dataStr)[2] > 3) for(ii in 3:(dim(lop$dataStr)[2]-1)) if(class(lop$dataStr[,ii]) == 'factor')
cat(nlevels(lop$dataStr[,ii]), 'levels for factor', names(lop$dataStr)[ii], ':',
levels(lop$dataStr[,ii]), '\n') else if(class(lop$dataStr[,ii]) == 'numeric' | class(lop$dataStr[,ii]) == 'matrix') # numeric doesn't work
cat(length(lop$dataStr[,ii]), 'centered values for numeric variable', names(lop$dataStr)[ii], ':', lop$dataStr[,ii], '\n')
#cat(lop$num_glt, 'post hoc tests\n')
cat('\nContingency tables of subject distributions among the categorical variables:\n\n')
cat('***** End of data structure information *****\n')
cat('++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n')
cat('Reading input files now...\n\n')
# Read in the 1st input file so that we have the dimension information
inData <- read.AFNI(lop$dataStr[1, lop$IF], verb=lop$verb, meth=lop$iometh, forcedset = TRUE)
dimx <- inData$dim[1]
dimy <- inData$dim[2]
dimz <- inData$dim[3]
# for writing output purpose
head <- inData
# Read in all input files
inData <- unlist(lapply(lapply(lop$dataStr[, lop$IF], read.AFNI, verb=lop$verb, meth=lop$iometh, forcedset = TRUE), '[[', 1))
tryCatch(dim(inData) <- c(dimx, dimy, dimz, nF), error=function(e)
errex.AFNI(c("At least one of the input files has different dimensions:\n",
"either (1) numbers of voxels along X, Y, Z axes are different across files;\n",
"or (2) some input files have more than one value per voxel.\n",
"Run \"3dinfo -header_line -prefix -same_grid -n4 *.HEAD\" in the directory where\n",
"the files are stored, and pinpoint out which file(s) is the trouble maker.\n",
"Replace *.HEAD with *.nii or something similar for other file formats.\n")))
cat('Reading input files for effect estimates: Done!\n\n')
# masking
if(!is.na(lop$maskFN)) {
#Mask <- read.AFNI(lop$maskFN, verb=lop$verb, meth=lop$iometh, forcedset = TRUE)$brk[,,,1]
if(!all(c(dimx, dimy, dimz)==dim(lop$maskData)[1:3])) stop("Mask dimensions don't match the input files!")
lop$maskData <- array(lop$maskData, dim=c(dimx, dimy, dimz))
inData <- array(apply(inData, 4, function(x) x*(abs(lop$maskData)>tolL)), dim=c(dimx,dimy,dimz,nF))
#if(!is.na(lop$dataStr$tStat)) inDataV <- array(apply(inDataV, 4, function(x) x*(abs(Mask)>tolL)), dim=c(dimx,dimy,dimz,nF))
}
# voxel-wise covariate files: for LME only; not TRR yet
if(!is.na(lop$vQV)) {
tmpDat <- read.AFNI(as.character(unique(lop$dataStr[,lop$vQV[1]])[1]), verb=lop$verb, meth=lop$iometh, forcedset = TRUE)
dimx <- tmpDat$dim[1]
dimy <- tmpDat$dim[2]
dimz <- tmpDat$dim[3]
head <- tmpDat
#for(ii in lop$vQV)
#if(length(unique(lop$dataStr[,ii])) != nlevels(lop$dataStr$Subj))
# errex.AFNI(c("Error with voxel-wise covariate ", ii, ": Each subject is only\n",
# "allowed to have one volume; that is, the covariate has to be at the\n",
# "subject level.")) else { # currently consider one voxel-wise covariate only: may generalize later?
#vQV <- unlist(lapply(lapply(unique(lop$dataStr[,lop$vQV[1]]), read.AFNI, verb=lop$verb, meth=lop$iometh, forcedset = TRUE), '[[', 1))
vQV <- unlist(lapply(lapply(as.character(lop$dataStr[,lop$vQV[1]]), read.AFNI, verb=lop$verb, meth=lop$iometh, forcedset = TRUE), '[[', 1))
#dim(vQV) <- c(dimx, dimy, dimz, length(unique(lop$dataStr[,lop$vQV[1]])))
dim(vQV) <- c(dimx, dimy, dimz, length(lop$dataStr[,lop$vQV[1]]))
inData <- c(inData, vQV)
#dim(inData) <- c(dimx, dimy, dimz, lop$nVVars+lop$nSubj)
dim(inData) <- c(dimx, dimy, dimz, 2*length(lop$dataStr[,lop$vQV[1]]))
#}
} else vQV <- NULL
# try out a few voxels and see if the model is OK, and find out the number of F tests and DF's
# for t tests (and catch potential problems as well)
#ii<-dimx%/%3; jj<-dimy%/%3; kk<-dimz%/%3
###############################
# add a new column to store the voxel-wise filenames
if(any(!is.na(lop$vVars))) {
lop$dataStr <- cbind(lop$dataStr, lop$dataStr[, lop$vQV[1]])
names(lop$dataStr)[length(names(lop$dataStr))] <- paste(lop$vQV[1], '_fn', sep='')
lop$nVVars <- nlevels(lop$dataStr[,paste(lop$vQV[1], '_fn', sep='')])
}