/
dnn_textures.bib
1609 lines (1484 loc) · 87.7 KB
/
dnn_textures.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@article{rosenholtz_what_2011,
title = {What Your Visual System Sees Where You Are Not Looking},
timestamp = {2014-06-09T12:05:16Z},
journal = {IS\&T/SPIE Electronic Imaging},
author = {Rosenholtz, Ruth},
year = {2011},
pages = {786510--786514},
file = {Rosenholtz_2011_IS&TSPIE Electronic Imaging.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2011/Rosenholtz_2011_IS&TSPIE Electronic Imaging.pdf:application/pdf}
}
@book{gelman_data_2007,
address = {New York, NY},
title = {Data {{Analysis}} Using Regression and Multilevel/Hierarchical Models},
timestamp = {2015-01-19T11:08:58Z},
publisher = {{Cambridge Univ Press}},
author = {Gelman, Andrew and Hill, Jennifer},
year = {2007},
file = {Gelman_Hill_2007.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2007/Gelman_Hill_2007.pdf:application/pdf}
}
@incollection{dakin_seeing_2014,
title = {Seeing Statistical Regularities: {{Texture}} and Pattern Perception},
timestamp = {2014-06-09T11:46:39Z},
booktitle = {Oxford {{Handbook}} of {{Perceptual Organization}}},
author = {Dakin, Steven},
year = {2014},
keywords = {1,averaging,distil a massive amount,global processing,guide behaviour within complex,has evolved to effectively,introduction,local-to-global,natural,of,seeing statistics,statistics,texture,the brain must rapidly,the human visual system,to achieve this goal,visual environments},
pages = {1--19},
file = {Dakin_2014.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2014/Dakin_2014.pdf:application/pdf}
}
@article{wickham_tidy_2014,
title = {Tidy {{Data}}},
volume = {59},
issn = {1548-7660},
timestamp = {2015-02-24T14:57:20Z},
number = {10},
journal = {Journal of Statistical Software},
author = {Wickham, Hadley},
month = sep,
year = {2014},
pages = {??--??},
file = {Wickham_2014_Journal of Statistical Software.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2014/Wickham_2014_Journal of Statistical Software.pdf:application/pdf},
day = {12},
coden = {JSSOBK},
bibdate = {2014-05-09},
accepted = {2014-05-09},
submitted = {2013-02-20}
}
@incollection{rosenholtz_texture_2014,
title = {Texture Perception},
timestamp = {2014-06-09T12:05:17Z},
number = {Gibson 1986},
booktitle = {Oxford {{Handbook}} of {{Perceptual Organization}}},
publisher = {{Oxford University Press}},
author = {Rosenholtz, Ruth},
year = {2014},
pages = {1--24},
file = {Rosenholtz_2014.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2014/Rosenholtz_2014.pdf:application/pdf}
}
@article{freeman_metamers_2011,
title = {Metamers of the Ventral Stream},
volume = {14},
timestamp = {2014-06-09T11:49:31Z},
number = {9},
journal = {Nature Neuroscience},
author = {Freeman, Jeremy and Simoncelli, Eero P},
year = {2011},
pages = {1195--1201},
file = {Freeman_Simoncelli_2011_Nature Neuroscience.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2011/Freeman_Simoncelli_2011_Nature Neuroscience.pdf:application/pdf;Freeman_Simoncelli_2011_Nature Neuroscience4.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2011/Freeman_Simoncelli_2011_Nature Neuroscience4.pdf:application/pdf}
}
@book{wickham_ggplot2_2009,
address = {New York},
title = {Ggplot2: {{Elegant Graphics}} for {{Data Analysis}}},
isbn = {978-0-387-98140-6},
timestamp = {2014-06-09T12:11:44Z},
publisher = {{Springer}},
author = {Wickham, Hadley},
year = {2009}
}
@article{yamins_performanceoptimized_2014,
title = {Performance-Optimized Hierarchical Models Predict Neural Responses in Higher Visual Cortex},
volume = {111},
issn = {0027-8424, 1091-6490},
doi = {10.1073/pnas.1403112111},
language = {en},
timestamp = {2014-06-24T12:40:04Z},
number = {23},
urldate = {2014-06-20},
journal = {Proceedings of the National Academy of Sciences},
author = {Yamins, D. L. K. and Hong, H. and Cadieu, C. F. and Solomon, E. A. and Seibert, D. and DiCarlo, J. J.},
month = jun,
year = {2014},
pages = {8619--8624},
file = {Yamins_etal_2014_Proceedings of the National Academy of Sciences.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2014/Yamins_etal_2014_Proceedings of the National Academy of Sciences.pdf:application/pdf}
}
@inproceedings{freeman_functionally_2013,
address = {San Diego, CA},
title = {Functionally Partitioning the Ventral Stream with Controlled Natural Stimuli},
abstract = {The visual areas of the primate cerebral cortex provide distinct representations of the visual world, each with a distinct function and topographic representation. Neurons in primary visual cortex respond selectively to orientation and spatial frequency, whereas neurons in inferotemporal and lateral occipital areas respond selectively to complex objects. But the areas in between, in particular V2 and V4, have been more difficult to differentiate on functional grounds. In previous work (Freeman, Ziemba et al., Nature Neurosci, 2013), we showed that, in both macaque and human, synthetic stimuli whose higher-order statistical properties emulated natural texture images yielded differential responses in V2, but not V1. We have now examined responses to these stimuli and to photographs of objects and scenes, to better distinguish the properties of V2 from those of the areas that provide its input and receive its output. We used fMRI to measure BOLD responses while human observers viewed rapidly presented images, while performing an attentionally-diverting task at fixation. We measured BOLD responses while presenting stimuli drawn from different stimulus classes in alternating blocks. We used three classes: photographs of scenes and objects, texture images matched to the higher-order statistics of these photographs (specifically, the spatially-averaged distribution and correlations of model V1 outputs, Portilla \& Simoncelli, 2000), and noise images matched only to the overall orientation and frequency content of the photographs. As we found previously, fMRI responses to textures were larger than those to noise in V2, V3, and to some extent V4, but not in V1, and not in inferotemporal and lateral occipital areas thought to be selective for complex objects. In contrast, responses to photographs were larger than those to noise in those higher areas, as well as in V2-V4, but not in V1. The differentiation between V1 and V2, however, was less robust than for the more controlled, homogenous textures. Finally, comparing responses to photographs and textures revealed differential activity in higher areas, but little or no differential responses in V1-V4. Together, these three comparisons offer a simple functional account of the visual cortical cascade. Whereas V1 encodes basic spectral properties, V2, V3, and to some extent V4 represent the higher-order statistics of textures. Downstream areas capture the kinds of global structures that are unique to images of natural scenes and objects.},
timestamp = {2015-04-08T09:56:54Z},
booktitle = {Annual {{Meeting}}, {{Neuroscience}}},
publisher = {{Society for Neuroscience}},
author = {Freeman, J. and Ziemba, C. M. and Simoncelli, E. P. and Movshon, J. A.},
month = nov,
year = {2013}
}
@article{kummerer_deepgaze_2016,
title = {{{DeepGaze II}}: {{Reading}} Fixations from Deep Features Trained on Object Recognition},
volume = {1610.01563},
timestamp = {2017-02-05T13:25:39Z},
journal = {arXiv},
author = {K{\"u}mmerer, Matthias and Wallis, T.~S.~A. and Bethge, Matthias},
month = oct,
year = {2016},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Quantitative Biology - Neurons and Cognition,Statistics - Applications},
archiveprefix = {arXiv},
primaryclass = {cs.CV},
adsurl = {http://adsabs.harvard.edu/abs/2016arXiv161001563K},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
@article{moscatelli_modeling_2012,
title = {Modeling Psychophysical Data at the Population-Level: The Generalized Linear Mixed Model.},
volume = {12},
abstract = {In psychophysics, researchers usually apply a two-level model for the analysis of the behavior of the single subject and the population. This classical model has two main disadvantages. First, the second level of the analysis discards information on trial repetitions and subject-specific variability. Second, the model does not easily allow assessing the goodness of fit. As an alternative to this classical approach, here we propose the Generalized Linear Mixed Model (GLMM). The GLMM separately estimates the variability of fixed and random effects, it has a higher statistical power, and it allows an easier assessment of the goodness of fit compared with the classical two-level model. GLMMs have been frequently used in many disciplines since the 1990s; however, they have been rarely applied in psychophysics. Furthermore, to our knowledge, the issue of estimating the point-of-subjective-equivalence (PSE) within the GLMM framework has never been addressed. Therefore the article has two purposes: It provides a brief introduction to the usage of the GLMM in psychophysics, and it evaluates two different methods to estimate the PSE and its variability within the GLMM framework. We compare the performance of the GLMM and the classical two-level model on published experimental data and simulated data. We report that the estimated values of the parameters were similar between the two models and Type I errors were below the confidence level in both models. However, the GLMM has a higher statistical power than the two-level model. Moreover, one can easily compare the fit of different GLMMs according to different criteria. In conclusion, we argue that the GLMM can be a useful method in psychophysics.},
timestamp = {2014-06-09T12:01:04Z},
number = {11},
journal = {Journal of Vision},
author = {Moscatelli, Alessandro and Mezzetti, Maura and Lacquaniti, Francesco},
year = {2012},
file = {Moscatelli_etal_2012_Journal of Vision.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2012/Moscatelli_etal_2012_Journal of Vision.pdf:application/pdf}
}
@incollection{xie_knitr_2013,
title = {Knitr: {{A}} Comprehensive Tool for Reproducible Research in {{R}}. {{BT}} - {{Implementing Reproducible Computational Research}}},
isbn = {978-1-4665-6159-5},
timestamp = {2014-06-09T12:12:21Z},
booktitle = {Implementing {{Reproducible Computational Research}}},
publisher = {{Chapman \& Hall/CRC}},
author = {Xie, Yihui},
editor = {Stodden, Victoria and Leisch, Frederich and Peng, Roger D},
year = {2013}
}
@book{knoblauch_modeling_2012,
address = {New York},
title = {Modeling {{Psychophysical Data}} in {{R}}},
isbn = {978-1-4614-4474-9},
abstract = {Many of the commonly used methods for modeling and fitting psychophysical data are special cases of statistical procedures of great power and generality, notably the Generalized Linear Model (GLM). This book illustrates how to fit data from a variety of psychophysical paradigms using modern statistical methods and the statistical language R. The paradigms include signal detection theory, psychometric function fitting, classification images and more. In two chapters, recently developed methods for scaling appearance, maximum likelihood difference scaling and maximum likelihood conjoint measurement are examined. The authors also consider the application of mixed-effects models to psychophysical data.R is an open-source programming language that is widely used by statisticians and is seeing enormous growth in its application to data in all fields. It is interactive, containing many powerful facilities for optimization, model evaluation, model selection, and graphical display of data. The reader who fits data in R can readily make use of these methods. The researcher who uses R to fit and model his data has access to most recently developed statistical methods.This book does not assume that the reader is familiar with R, and a little experience with any programming language is all that is needed to appreciate this book. There are large numbers of examples of R in the text and the source code for all examples is available in an R package MPDiR available through R. Kenneth Knoblauch is a researcher in the Department of Integrative Neurosciences in Inserm Unit 846, The Stem Cell and Brain Research Institute and associated with the University Claude Bernard, Lyon 1, in France. Laurence T. Maloney is Professor of Psychology and Neural Science at New York University. His research focusses on applications of mathematical models to perception, motor control and decision making.},
timestamp = {2014-07-22T13:45:51Z},
publisher = {{Springer}},
author = {Knoblauch, Kenneth and Maloney, Laurence T},
year = {2012}
}
@article{imagenet_2015,
title = {{{ImageNet Large Scale Visual Recognition Challenge}}},
volume = {115},
doi = {10.1007/s11263-015-0816-y},
timestamp = {2016-06-16T11:56:12Z},
number = {3},
journal = {International Journal of Computer Vision (IJCV)},
author = {Russakovsky, Olga and Deng, Jia and Su, Hao and Krause, Jonathan and Satheesh, Sanjeev and Ma, Sean and Huang, Zhiheng and Karpathy, Andrej and Khosla, Aditya and Bernstein, Michael and Berg, Alexander C. and Fei-Fei, Li},
year = {2015},
pages = {211--252}
}
@article{hoffman_nouturn_2014,
title = {The {{No}}-{{U}}-{{Turn Sampler}}: {{Adaptively Setting Path Lengths}} in {{Hamiltonian Monte Carlo}}},
volume = {15},
timestamp = {2014-09-06T09:30:55Z},
number = {Apr},
journal = {Journal of Machine Learning Research},
author = {Hoffman, Matthew D. and Gelman, Andrew},
year = {2014},
pages = {1593--1623},
file = {Hoffman_Gelman_Journal of Machine Learning Research2.pdf:/Users/tsawallis/Dropbox/Zotero_Library/Hoffman_Gelman_Journal of Machine Learning Research2.pdf:application/pdf}
}
@inproceedings{gatys_texture_2015-1,
title = {Texture {{Synthesis Using Convolutional Neural Networks}}},
timestamp = {2016-05-03T14:50:04Z},
booktitle = {Advances in {{Neural Information Processing Systems}} 28},
author = {Gatys, L. A. and Ecker, A. S. and Bethge, M.},
month = may,
year = {2015},
keywords = {convolutional neural networks,deep learning,texture synthesis,ventral stream},
file = {Gatys_etal_2015_Advances in Neural Information Processing Systems 28.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2015/Gatys_etal_2015_Advances in Neural Information Processing Systems 28.pdf:application/pdf}
}
@book{jones_scipy_2001,
title = {{{SciPy}}: {{Open}} Source Scientific Tools for {{Python}}},
timestamp = {2016-10-07T12:53:07Z},
author = {Jones, Eric and Oliphant, Travis and Peterson, Pearu},
year = {2001},
note = {[Online; accessed 2014-09-12]}
}
@article{balas_texture_2006,
title = {Texture Synthesis and Perception: Using Computational Models to Study Texture Representations in the Human Visual System.},
volume = {46},
issn = {0042-6989},
doi = {10.1016/j.visres.2005.04.013},
abstract = {Traditionally, texture perception has been studied using artificial textures made of random dots or repeated shapes. At the same time, computer algorithms for natural texture synthesis have improved dramatically. We seek to unify these two fields through a psychophysical assessment of a particular computational model, providing insight into which statistics are most vital for natural texture perception. We employ Portilla and Simoncelli's texture synthesis algorithm, a parametric model that mimics computations carried out in human vision. We find an intriguing interaction between texture type (periodic, structured, or 3-D textures) and image statistics (autocorrelation function and filter magnitude correlations), suggesting different representations may be employed for these texture families under pre-attentive viewing.},
timestamp = {2014-06-09T10:53:16Z},
number = {3},
journal = {Vision research},
author = {Balas, Benjamin J},
month = feb,
year = {2006},
keywords = {Adult,Algorithms,Analysis of Variance,Computer Simulation,Contrast Sensitivity,Contrast Sensitivity: physiology,Humans,Models,Psychological,Psychological Tests,Psychophysics},
pages = {299--309},
file = {Balas_2006_Vision research.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2006/Balas_2006_Vision research.pdf:application/pdf}
}
@article{wilkinson_lateral_1997,
title = {Lateral Interactions in Peripherally Viewed Texture Arrays},
volume = {14},
abstract = {A horizontal array of vertically oriented Gabor elements was used to examine lateral masking in the near periphery (1.9$^\circ$\textendash{}5.7$^\circ$ eccentricity). Thresholds were assessed for detecting changes in the contrast, the spatial frequency, and the orientation of the central ...},
timestamp = {2014-06-09T12:11:53Z},
number = {9},
journal = {Journal of the Optical Society of America A},
author = {Wilkinson, Frances and Wilson, Hugh R and Ellemberg, Dave},
year = {1997},
pages = {2057},
file = {Wilkinson_etal_1997_Journal of the Optical Society of America A.pdf:/Users/tsawallis/Dropbox/Zotero_Library/1997/Wilkinson_etal_1997_Journal of the Optical Society of America A.pdf:application/pdf}
}
@article{cheung_nonlinear_2008,
title = {Nonlinear Mixed-Effects Modeling of {{MNREAD}} Data.},
volume = {49},
abstract = {PURPOSE:It is often difficult to estimate parameters from individual clinical data because of noisy or incomplete measurements. Nonlinear mixed-effects (NLME) modeling provides a statistical framework for analyzing population parameters and the associated variations, even when individual data sets are incomplete. The authors demonstrate the application of NLME by analyzing data from the MNREAD, a continuous-text reading-acuity chart. METHODS:The authors analyzed MNREAD data (measurements of reading speed vs. print size) for two groups: 42 adult observers with normal vision and 14 patients with age-related macular degeneration (AMD). Truncated sets of MNREAD data were generated from the individual observers with normal vision. The MNREAD data were fitted with a two-limb function and an exponential-decay function using an individual curve-fitting approach and an NLME modeling approach. RESULTS:The exponential-decay function provided slightly better fits than the two-limb function. When the parameter estimates from the truncated data sets were used to predict the missing data, NLME modeling gave better predictions than individual fitting. NLME modeling gave reasonable parameter estimates for AMD patients even when individual fitting returned unrealistic estimates. CONCLUSIONS:These analyses showed that (1) an exponential-decay function fits MNREAD data very well, (2) NLME modeling provides a statistical framework for analyzing MNREAD data, and (3) NLME analysis provides a way of estimating MNREAD parameters even for incomplete data sets. The present results demonstrate the potential value of NLME modeling for clinical vision data.},
timestamp = {2014-06-09T11:23:34Z},
number = {2},
journal = {Investigative Ophthalmology \& Visual Science},
author = {Cheung, Sing-Hang and Kallie, Christopher S and Legge, Gordon E and Cheong, Allen M Y},
year = {2008},
pages = {828--835},
file = {Cheung_etal_2008_Investigative Ophthalmology & Visual Science.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2008/Cheung_etal_2008_Investigative Ophthalmology & Visual Science.pdf:application/pdf}
}
@article{gelman_prior_2006,
title = {Prior Distributions for Variance Parameters in Hierarchical Models},
volume = {1},
timestamp = {2014-06-09T11:50:14Z},
number = {3},
journal = {Bayesian Analysis},
author = {Gelman, A},
year = {2006},
pages = {515--533},
file = {Gelman_2006_Bayesian Analysis.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2006/Gelman_2006_Bayesian Analysis.pdf:application/pdf}
}
@article{freeman_functional_2013,
title = {A Functional and Perceptual Signature of the Second Visual Area in Primates},
volume = {16},
abstract = {There is no generally accepted account of the function of the second visual cortical area (V2), partly because no simple response properties robustly distinguish V2 neurons from those in primary visual cortex (V1). We constructed synthetic stimuli replicating the higher-order statistical dependencies found in natural texture images and used them to stimulate macaque V1 and V2 neurons. Most V2 cells responded more vigorously to these textures than to control stimuli lacking naturalistic structure; V1 cells did not. Functional magnetic resonance imaging (fMRI) measurements in humans revealed differences between V1 and V2 that paralleled the neuronal measurements. The ability of human observers to detect naturalistic structure in different types of texture was well predicted by the strength of neuronal and fMRI responses in V2 but not in V1. Together, these results reveal a particular functional role for V2 in the representation of natural image structure.},
timestamp = {2014-06-09T11:49:33Z},
number = {7},
journal = {Nature Neuroscience},
author = {Freeman, Jeremy and Ziemba, CM and Heeger, David J and Simoncelli, Eero P and Movshon, J Anthony},
year = {2013},
pages = {974--981},
file = {Freeman_etal_2013_Nature Neuroscience.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2013/Freeman_etal_2013_Nature Neuroscience.pdf:application/pdf}
}
@book{kruschke_doing_2011,
address = {Burlington MA},
title = {Doing {{Bayesian Data Analysis}}},
isbn = {978-0-12-381485-2},
timestamp = {2015-05-26T13:13:44Z},
publisher = {{Academic Press / Elsevier}},
author = {Kruschke, J K},
year = {2011},
file = {Kruschke_2011.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2011/Kruschke_2011.pdf:application/pdf}
}
@article{balas_attentive_2008,
title = {Attentive Texture Similarity as a Categorization Task: {{Comparing}} Texture Synthesis Models},
volume = {41},
timestamp = {2014-06-09T10:53:23Z},
number = {3},
journal = {Pattern Recognition},
author = {Balas, Benjamin J},
year = {2008},
pages = {972--982},
file = {Balas_2008_Pattern Recognition.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2008/Balas_2008_Pattern Recognition.pdf:application/pdf}
}
@article{adelson_plenoptic_1991,
title = {The Plenoptic Function and the Elements of Early Vision},
volume = {1},
timestamp = {2014-06-09T07:56:20Z},
journal = {Computational models of visual processing},
author = {Adelson, Edward H and Bergen, JR R},
editor = {Landy, Michael S and Movshon, J Anthony},
year = {1991},
pages = {3--20},
file = {Adelson_Bergen_1991_Computational models of visual processing.pdf:/Users/tsawallis/Dropbox/Zotero_Library/1991/Adelson_Bergen_1991_Computational models of visual processing.pdf:application/pdf;Adelson_Bergen_1991_Computational models of visual processing3.pdf:/Users/tsawallis/Dropbox/Zotero_Library/1991/Adelson_Bergen_1991_Computational models of visual processing3.pdf:application/pdf}
}
@article{gelman_inference_1992,
title = {Inference from Iterative Simulation Using Multiple Sequences},
volume = {7},
timestamp = {2014-06-09T11:50:19Z},
number = {4},
journal = {Statistical Science},
author = {Gelman, A and Rubin, D B},
year = {1992},
pages = {457--472},
file = {Gelman_Rubin_1992_Statistical Science.pdf:/Users/tsawallis/Dropbox/Zotero_Library/1992/Gelman_Rubin_1992_Statistical Science.pdf:application/pdf}
}
@article{rosenholtz_summary_2012,
title = {A Summary Statistic Representation in Peripheral Vision Explains Visual Search.},
volume = {12},
abstract = {Vision is an active process: We repeatedly move our eyes to seek out objects of interest and explore our environment. Visual search experiments capture aspects of this process, by having subjects look for a target within a background of distractors. Search speed often correlates with target-distractor discriminability; search is faster when the target and distractors look quite different. However, there are notable exceptions. A given discriminability can yield efficient searches (where the target seems to "pop-out") as well as inefficient ones (where additional distractors make search significantly slower and more difficult). Search is often more difficult when finding the target requires distinguishing a particular configuration or conjunction of features. Search asymmetries abound. These puzzling results have fueled three decades of theoretical and experimental studies. We argue that the key issue in search is the processing of image patches in the periphery, where visual representation is characterized by summary statistics computed over a sizable pooling region. By quantifying these statistics, we predict a set of classic search results, as well as peripheral discriminability of crowded patches such as those found in search displays.},
timestamp = {2014-06-09T12:05:19Z},
number = {4},
journal = {Journal of Vision},
author = {Rosenholtz, Ruth and Huang, Jie and Raj, Alvin and Balas, Benjamin J and Ilie, Livia},
year = {2012},
file = {Rosenholtz_etal_2012_Journal of Vision.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2012/Rosenholtz_etal_2012_Journal of Vision.pdf:application/pdf}
}
@article{ho_conjoint_2008,
title = {Conjoint Measurement of Gloss and Surface Texture},
volume = {19},
timestamp = {2014-06-09T11:52:48Z},
number = {2},
journal = {Psychological Science},
author = {Ho, Yun-Xian and Landy, Michael S and Maloney, Laurence T},
year = {2008},
pages = {196--204},
file = {Ho_etal_2008_Psychological Science.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2008/Ho_etal_2008_Psychological Science.pdf:application/pdf}
}
@inproceedings{yamins_hierarchical_2013,
title = {Hierarchical {{Modular Optimization}} of {{Convolutional Networks Achieves Representations Similar}} to {{Macaque IT}} and {{Human Ventral Stream}}},
timestamp = {2016-09-07T12:01:18Z},
booktitle = {Advances in {{Neural Information Processing Systems}}},
author = {Yamins, Daniel L K and Hong, H and Cadieu, C and DiCarlo, James J},
year = {2013}
}
@article{brainard_psychophysics_1997,
title = {The {{Psychophysics Toolbox}}},
volume = {10},
abstract = {The Psychophysics Toolbox is a software package that supports visual psychophysics. Its routines provide an interface between a high-level interpreted language (MATLAB on the Macintosh) and the video display hardware. A set of example programs is included with the Toolbox distribution.},
timestamp = {2014-06-09T11:11:43Z},
number = {4},
journal = {Spatial Vision},
author = {Brainard, David H},
year = {1997},
pages = {433--436},
annote = {9176952},
file = {Brainard_1997_Spatial Vision.pdf:/Users/tsawallis/Dropbox/Zotero_Library/1997/Brainard_1997_Spatial Vision.pdf:application/pdf}
}
@article{kingdom_sensitivity_2001,
title = {Sensitivity to Contrast Histogram Differences in Synthetic Wavelet-Textures.},
volume = {41},
abstract = {Recent research on texture synthesis suggests that characterisation of those properties of textures to which human observers are sensitive may be provided by the histograms of the coefficients of a wavelet decomposition. In this study we examined the properties of wavelet histograms that affect texture discrimination by measuring observer sensitivity to differences in the wavelet histograms of synthetic textures. The textures, generated via Gabor micropattern synthesis, were broadband, with amplitude spectra that are characteristic of natural images, i.e. 1/f. We measured texture-difference thresholds for three moments of the wavelet histograms \textendash{} variance, skew and kurtosis \textendash{} by manipulating the contrast, phase, and density, of the Gabor elements used to construct the textures. Observers discriminated more efficiently between textures that had differences in kurtosis, than between textures that had differences in either variance or skew. Performance was compared to two model observers; one used the pixel-luminance histogram, the other used the histogram of the output of wavelet-filters. The results support the idea that the visual system is relatively sensitive to the kurtosis, or 4th moment, of the wavelet histogram of textures. We argue that higher than 4th-order moments will, in practice, become increasingly difficult for the visual system to represent because the lack of a perfect match between the elements and the receptive fields effectively blurs the response histogram, thereby attenuating higher moments.},
timestamp = {2014-06-09T11:55:26Z},
number = {5},
journal = {Vision Research},
author = {Kingdom, F A and Hayes, A and Field, D J},
year = {2001},
pages = {585--598},
file = {Kingdom_etal_2001_Vision Research.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2001/Kingdom_etal_2001_Vision Research.pdf:application/pdf}
}
@article{thaler_what_2013,
title = {What Is the Best Fixation Target? {{The}} Effect of Target Shape on Stability of Fixational Eye Movements},
volume = {76},
abstract = {Vision Research, 76 (2013) 31-42. doi:10.1016/j.visres.2012.10.012},
timestamp = {2016-02-18T10:56:05Z},
journal = {Vision Research},
author = {Thaler, L and Sch{\"u}tz, A C and Goodale, M A and Gegenfurtner, K R},
year = {2013},
pages = {31--42},
file = {Thaler_etal_2013_Vision Research.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2013/Thaler_etal_2013_Vision Research.pdf:application/pdf}
}
@article{portilla_parametric_2000,
title = {A Parametric Texture Model Based on Joint Statistics of Complex Wavelet Coefficients},
volume = {40},
timestamp = {2014-06-09T12:04:02Z},
number = {1},
journal = {International Journal of Computer Vision},
author = {Portilla, J and Simoncelli, Eero P},
year = {2000},
pages = {49--70},
annote = {11620918327101739510related:9tG58qXLRaEJ},
file = {Portilla_Simoncelli_2000_International Journal of Computer Vision.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2000/Portilla_Simoncelli_2000_International Journal of Computer Vision.pdf:application/pdf}
}
@article{arsenault_higher_2011,
title = {Higher Order Texture Statistics Impair Contrast Boundary Segmentation.},
volume = {11},
abstract = {Texture boundary segmentation is conventionally thought to be mediated by global differences in Fourier energy, i.e., low-order texture statistics. Here, we have examined the importance of higher order statistical structure of textures in a simple second-order segmentation task. We measured modulation depth thresholds for contrast boundaries imposed on texture samples extracted from natural scene photographs, using forced-choice judgments of boundary orientation (left vs. right oblique). We compared segmentation thresholds for contrast boundaries whose constituent textures were either intact or phase scrambled. In the intact condition, all the texture statistics were preserved, while in the phase-scrambled condition the higher order statistics of the same texture were randomized, but the lower order statistics were unchanged. We found that (1) contrast boundary segmentation is impaired by the presence of higher order statistics; (2) every texture shows impairment but some substantially more than others; and (3) our findings are not related to scrambling-induced changes in detectability. The magnitude of phase-scrambling effect for individual textures was uncorrelated with variations in their amplitude spectra, but instead we suggest that it might be related to differences in local edge structure or sparseness.},
timestamp = {2014-06-09T08:41:49Z},
number = {10},
journal = {Journal of Vision},
author = {Arsenault, Elizabeth and Yoonessi, Ahmad and Baker, Curtis L},
year = {2011},
pages = {14},
file = {Arsenault_etal_2011_Journal of Vision.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2011/Arsenault_etal_2011_Journal of Vision.pdf:application/pdf}
}
@article{balas_summary-statistic_2009,
title = {A Summary-Statistic Representation in Peripheral Vision Explains Visual Crowding},
volume = {9},
abstract = {Peripheral vision provides a less faithful representation of the visual input than foveal vision. Nonetheless, we can gain a lot of information about the world from our peripheral vision, for example in order to plan eye movements. The phenomenon of crowding shows that the reduction of information available in the periphery is not merely the result of reduced resolution. Crowding refers to visual phenomena in which identification of a target stimulus is significantly impaired by the presence of nearby stimuli, or flankers. What information is available in the periphery? We propose that the visual system locally represents peripheral stimuli by the joint statistics of responses of cells sensitive to different position, phase, orientation, and scale. This "textural" representation by summary statistics predicts the subjective "jumble" of features often associated with crowding. We show that the difficulty of performing an identification task within a single pooling region using this representation of the stimuli is correlated with peripheral identification performance under conditions of crowding. Furthermore, for a simple stimulus with no flankers, this representation can be adequate to specify the stimulus with some position invariance. This provides evidence that a unified neuronal mechanism may underlie peripheral vision, ordinary pattern recognition in central vision, and texture perception. A key component of our methodology involves creating visualizations of the information available in the summary statistics of a stimulus. We call these visualizations "mongrels" and show that they are highly useful in examining how the early visual system represents the visual input. Mongrels enable one to study the "equivalence classes" of our model, i.e., the sets of stimuli that map to the same representation according to the model.},
timestamp = {2014-06-09T10:53:30Z},
number = {12},
journal = {Journal of Vision},
author = {Balas, Benjamin J and Nakano, L and Rosenholtz, R},
year = {2009},
pages = {13},
annote = {http://www.ncbi.nlm.nih.gov/pubmed/20053104},
file = {Balas_etal_2009_Journal of Vision.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2009/Balas_etal_2009_Journal of Vision.pdf:application/pdf}
}
@article{fleming_visual_2014,
title = {Visual Perception of Materials and Their Properties},
volume = {94},
abstract = {VISION RESEARCH, 94 (2014) 62-75. doi:10.1016/j.visres.2013.11.004},
timestamp = {2014-06-09T11:49:09Z},
number = {C},
journal = {Vision research},
author = {Fleming, RW},
year = {2014},
pages = {62--75},
file = {Fleming_2014_Vision research.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2014/Fleming_2014_Vision research.pdf:application/pdf}
}
@book{r_core_development_team_r:_2016,
address = {Vienna, Austria},
title = {R: {{A Language}} and {{Environment}} for {{Statistical Computing}}},
timestamp = {2017-03-30T09:22:38Z},
publisher = {{R Foundation for Statistical Computing}},
author = {\{R Core Development Team\}},
year = {2016}
}
@book{macmillan_detection_2005,
address = {Mahwah, NJ},
title = {Detection {{Theory}}: {{A User}}'s {{Guide}}},
isbn = {0-8058-4230-6},
timestamp = {2015-03-04T12:25:54Z},
publisher = {{Lawrence Erlbaum}},
author = {Macmillan, N A and Creelman, C D},
year = {2005}
}
@article{malik_preattentive_1990,
title = {Preattentive Texture Discrimination with Early Vision Mechanisms.},
volume = {7},
abstract = {We present a model of human preattentive texture perception. This model consists of three stages: (1) convolution of the image with a bank of even-symmetric linear filters followed by half-wave rectification to give a set of responses modeling outputs of V1 simple cells, (2) inhibition, localized in space, within and among the neural-response profiles that results in the suppression of weak responses when there are strong responses at the same or nearby locations, and (3) texture-boundary detection by using wide odd-symmetric mechanisms. Our model can predict the salience of texture boundaries in any arbitrary gray-scale image. A computer implementation of this model has been tested on many of the classic stimuli from psychophysical literature. Quantitative predictions of the degree of discriminability of different texture pairs match well with experimental measurements of discriminability in human observers.},
timestamp = {2014-06-09T11:59:10Z},
number = {5},
journal = {Journal of the Optical Society of America A},
author = {Malik, J and Perona, P},
year = {1990},
pages = {923--932},
file = {Malik_Perona_1990_Journal of the Optical Society of America A.pdf:/Users/tsawallis/Dropbox/Zotero_Library/1990/Malik_Perona_1990_Journal of the Optical Society of America A.pdf:application/pdf}
}
@article{donahue_decaf_2013,
title = {Decaf: {{A}} Deep Convolutional Activation Feature for Generic Visual Recognition},
timestamp = {2014-11-03T15:33:10Z},
journal = {arXiv},
author = {Donahue, J and Jia, Y and Vinyals, O},
year = {2013},
pages = {1--10},
file = {Donahue_etal_2013_arXiv.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2013/Donahue_etal_2013_arXiv.pdf:application/pdf}
}
@article{wickham_splitapplycombine_2011,
title = {The {{Split}}-{{Apply}}-{{Combine Strategy}} for {{Data Analysis}}},
volume = {40},
timestamp = {2014-06-09T12:11:45Z},
number = {1},
journal = {Journal of Statistical Software},
author = {Wickham, Hadley},
year = {2011},
pages = {1--29}
}
@article{kleiner_whats_2007,
title = {What's New in {{Psychtoolbox}}-3?},
volume = {36},
timestamp = {2014-06-09T11:55:31Z},
number = {ECVP Abstract Supplement},
journal = {Perception},
author = {Kleiner, M and Brainard, David H and Pelli, Denis G},
year = {2007}
}
@article{pelli_videotoolbox_1997,
title = {The {{VideoToolbox}} Software for Visual Psychophysics: Transforming Numbers into Movies.},
volume = {10},
abstract = {The VideoToolbox is a free collection of two hundred C subroutines for Macintosh computers that calibrates and controls the computer-display interface to create accurately specified visual stimuli. High-level platform-independent languages like MATLAB are best for creating the numbers that describe the desired images. Low-level, computer-specific VideoToolbox routines control the hardware that transforms those numbers into a movie. Transcending the particular computer and language, we discuss the nature of the computer-display interface, and how to calibrate and control it.},
timestamp = {2014-06-09T12:03:05Z},
number = {4},
journal = {Spatial Vision},
author = {Pelli, Denis G},
year = {1997},
pages = {437--442},
file = {Pelli_1997_Spatial Vision.pdf:/Users/tsawallis/Dropbox/Zotero_Library/1997/Pelli_1997_Spatial Vision.pdf:application/pdf}
}
@article{balas_contrast_2012,
title = {Contrast Negation and Texture Synthesis Differentially Disrupt Natural Texture Appearance.},
volume = {3},
issn = {1664-1078},
doi = {10.3389/fpsyg.2012.00515},
abstract = {Natural textures have characteristic image statistics that make them discriminable from unnatural textures. For example, both contrast negation and texture synthesis alter the appearance of natural textures even though each manipulation preserves some features while disrupting others. Here, we examined the extent to which contrast negation and texture synthesis each introduce or remove critical perceptual features for discriminating unnatural textures from natural textures. We find that both manipulations remove information that observers use for distinguishing natural textures from transformed versions of the same patterns, but do so in different ways. Texture synthesis removes information that is relevant for discrimination in both abstract patterns and ecologically valid textures, and we also observe a category-dependent asymmetry for identifying an "oddball" real texture among synthetic distractors. Contrast negation exhibits no such asymmetry, and also does not impact discrimination performance in abstract patterns. We discuss our results in the context of the visual system's tuning to ecologically relevant patterns and other results describing sensitivity to higher-order statistics in texture patterns.},
timestamp = {2016-02-24T10:16:23Z},
number = {November},
journal = {Frontiers in psychology},
author = {Balas, Benjamin J},
month = jan,
year = {2012},
keywords = {image s,image statistics,texture discrimination,texture synthesis},
pages = {515},
file = {Balas_2012_Frontiers in psychology.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2012/Balas_2012_Frontiers in psychology.pdf:application/pdf}
}
@article{rosenholtz_rethinking_2012,
title = {Rethinking the Role of Top-down Attention in Vision: Effects Attributable to a Lossy Representation in Peripheral Vision.},
volume = {3},
abstract = {According to common wisdom in the field of visual perception, top-down selective attention is required in order to bind features into objects. In this view, even simple tasks, such as distinguishing a rotated T from a rotated L, require selective attention since they require feature binding. Selective attention, in turn, is commonly conceived as involving volition, intention, and at least implicitly, awareness. There is something non-intuitive about the notion that we might need so expensive (and possibly human) a resource as conscious awareness in order to perform so basic a function as perception. In fact, we can carry out complex sensorimotor tasks, seemingly in the near absence of awareness or volitional shifts of attention ("zombie behaviors"). More generally, the tight association between attention and awareness, and the presumed role of attention on perception, is problematic. We propose that under normal viewing conditions, the main processes of feature binding and perception proceed largely independently of top-down selective attention. Recent work suggests that there is a significant loss of information in early stages of visual processing, especially in the periphery. In particular, our texture tiling model (TTM) represents images in terms of a fixed set of "texture" statistics computed over local pooling regions that tile the visual input. We argue that this lossy representation produces the perceptual ambiguities that have previously been as ascribed to a lack of feature binding in the absence of selective attention. At the same time, the TTM representation is sufficiently rich to explain performance in such complex tasks as scene gist recognition, pop-out target search, and navigation. A number of phenomena that have previously been explained in terms of voluntary attention can be explained more parsimoniously with the TTM. In this model, peripheral vision introduces a specific kind of information loss, and the information available to an observer varies greatly depending upon shifts of the point of gaze (which usually occur without awareness). The available information, in turn, provides a key determinant of the visual system's capabilities and deficiencies. This scheme dissociates basic perceptual operations, such as feature binding, from both top-down attention and conscious awareness.},
timestamp = {2014-06-09T12:05:18Z},
journal = {Frontiers in Psychology},
author = {Rosenholtz, Ruth and Huang, Jie and Ehinger, Krista A},
year = {2012},
pages = {13},
file = {Rosenholtz_etal_2012_Frontiers in Psychology.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2012/Rosenholtz_etal_2012_Frontiers in Psychology.pdf:application/pdf}
}
@article{szegedy_intriguing_2013,
title = {Intriguing Properties of Neural Networks},
timestamp = {2014-10-23T13:34:47Z},
urldate = {2014-10-23},
journal = {arXiv preprint arXiv:1312.6199},
author = {Szegedy, Christian and Zaremba, Wojciech and Sutskever, Ilya and Bruna, Joan and Erhan, Dumitru and Goodfellow, Ian and Fergus, Rob},
year = {2013},
file = {1312.6199.pdf:/Users/tsawallis/Dropbox/Zotero_Library/zotero/storage/4ZATEDKB/1312.6199.pdf:application/pdf}
}
@article{khaligh-razavi_deep_2014,
title = {Deep {{Supervised}}, but {{Not Unsupervised}}, {{Models May Explain IT Cortical Representation}}},
volume = {10},
issn = {1553-7358},
doi = {10.1371/journal.pcbi.1003915},
language = {en},
timestamp = {2014-11-07T13:50:40Z},
number = {11},
urldate = {2014-11-07},
journal = {PLoS Computational Biology},
author = {Khaligh-Razavi, Seyed-Mahdi and Kriegeskorte, Nikolaus},
editor = {Diedrichsen, J{\"o}rn},
month = nov,
year = {2014},
pages = {e1003915},
file = {Khaligh-Razavi_Kriegeskorte_2014_PLoS Computational Biology.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2014/Khaligh-Razavi_Kriegeskorte_2014_PLoS Computational Biology.pdf:application/pdf}
}
@article{clarke_visual_2014,
title = {Visual Crowding Illustrates the Inadequacy of Local vs. Global and Feedforward vs. Feedback Distinctions in Modeling Visual Perception},
volume = {5},
issn = {1664-1078},
doi = {10.3389/fpsyg.2014.01193},
timestamp = {2014-11-11T12:32:37Z},
urldate = {2014-11-11},
journal = {Frontiers in Psychology},
author = {Clarke, Aaron M. and Herzog, Michael H. and Francis, Gregory},
month = oct,
year = {2014},
file = {Clarke_etal_2014_Frontiers in Psychology.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2014/Clarke_etal_2014_Frontiers in Psychology.pdf:application/pdf}
}
@book{burnham_model_2002,
address = {New York},
title = {Model Selection and Multimodel Inference a Practical Information-Theoretic Approach},
isbn = {0-387-22456-4 978-0-387-22456-5},
abstract = {This book is unique in that it covers the philosophy of model-based data analysis and a strategy for the analysis of empirical data. The book introduces information theoretic approaches and focuses critical attention on a priori modeling and the selection of a good approximating model that best represents the inference supported by the data. Kullback-Leibler Information represents a fundamental quantity in science and is Hirotugu Akaike's basis for model selection. The maximized log-likelihood function can be bias-corrected to provide an estimate of expected, relative Kullback-Leibler information. This leads to Akaike's Information Criterion (AIC) and various extensions. These are relatively simple and easy to use in practice. The information theoretic approaches provide a unified and rigorous theory, an extension of likelihood theory, an important application of information theory, and are objective and practical to employ across a very wide class of empirical problems. Model selection, under the information theoretic approach presented here, attempts to identify the (likely) best model, orders the models from best to worst, and measures the plausibility ("calibration") that each model is really the best as an inference. Model selection methods are extended to allow inference from more than a single "best" model. The book presents several new approaches to estimating model selection uncertainty and incorporating selection uncertainty into estimates of precision. An array of examples is given to illustrate various technical issues. This is an applied book written primarily for biologists and statisticians using models for making inferences from empirical data. People interested in the empirical sciences will find this material useful as it offers an alternative to hypothesis testing and Bayesian.},
language = {English},
timestamp = {2015-02-10T14:23:06Z},
urldate = {2015-02-10},
publisher = {{Springer}},
author = {Burnham, Kenneth P and Anderson, David Raymond},
year = {2002},
file = {Burnham_2002.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2002/Burnham_2002.pdf:application/pdf}
}
@article{okazawa_image_2015,
title = {Image Statistics Underlying Natural Texture Selectivity of Neurons in Macaque {{V4}}},
volume = {112},
issn = {0027-8424, 1091-6490},
doi = {10.1073/pnas.1415146112},
language = {en},
timestamp = {2015-04-22T21:03:14Z},
number = {4},
urldate = {2015-04-08},
journal = {Proceedings of the National Academy of Sciences},
author = {Okazawa, Gouki and Tajima, Satohiro and Komatsu, Hidehiko},
month = jan,
year = {2015},
pages = {E351--E360},
file = {Okazawa_etal_2015_Proceedings of the National Academy of Sciences.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2015/Okazawa_etal_2015_Proceedings of the National Academy of Sciences.pdf:application/pdf;Okazawa_etal_2015_Proceedings of the National Academy of Sciences.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2015/Okazawa_etal_2015_Proceedings of the National Academy of Sciences2.pdf:application/pdf}
}
@book{standevelopmentteam_stan_2015-1,
title = {Stan {{Modeling Language Users Guide}} and {{Reference Manual}}, {{Version}} 2.10.0},
timestamp = {2016-10-07T15:05:03Z},
author = {{Stan Development Team}},
year = {2015},
file = {Stan Development Team_2015.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2015/Stan Development Team_2015.pdf:application/pdf}
}
@article{herzog_crowding_2015,
title = {Crowding, Grouping, and Object Recognition: {{A}} Matter of Appearance},
volume = {15},
shorttitle = {Crowding, Grouping, and Object Recognition},
timestamp = {2015-05-23T13:28:45Z},
number = {6},
urldate = {2015-05-23},
journal = {Journal of Vision},
author = {Herzog, Michael H. and Sayim, Bilge and Chicherov, Vitaly and Manassi, Mauro},
year = {2015},
file = {Herzog_etal_2015_Journal of Vision.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2015/Herzog_etal_2015_Journal of Vision.pdf:application/pdf}
}
@article{vanderwalt_scikitimage_2014,
title = {Scikit-Image: Image Processing in {{Python}}},
volume = {2},
issn = {2167-8359},
doi = {10.7717/peerj.453},
timestamp = {2015-09-23T12:10:16Z},
journal = {PeerJ},
author = {{van der Walt}, St{\'e}fan and Sch{\"o}nberger, Johannes L. and Nunez-Iglesias, Juan and Boulogne, Fra{\c n}cois and Warner, Joshua D. and Yager, Neil and Gouillart, Emmanuelle and Yu, Tony and {the scikit-image contributors}},
month = jun,
year = {2014},
keywords = {Education,Image Processing,Open source,Python,Reproducible research,Scientific programming,Visualization},
pages = {e453},
file = {van der Walt_etal_2014_PeerJ.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2014/van der Walt_etal_2014_PeerJ.pdf:application/pdf}
}
@article{lecun_deep_2015,
title = {Deep Learning},
volume = {521},
issn = {0028-0836, 1476-4687},
doi = {10.1038/nature14539},
timestamp = {2015-05-29T12:38:03Z},
number = {7553},
urldate = {2015-05-29},
journal = {Nature},
author = {LeCun, Yann and Bengio, Yoshua and Hinton, Geoffrey},
month = may,
year = {2015},
pages = {436--444},
file = {LeCun_etal_2015_Nature.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2015/LeCun_etal_2015_Nature.pdf:application/pdf}
}
@article{standevelopmentteam_stan_2015,
title = {Stan: {{A C}}++ {{Library}} for {{Probability}} and {{Sampling}}, {{Version}} 2.14.0},
timestamp = {2017-03-30T09:24:23Z},
author = {{Stan Development Team}},
year = {2017}
}
@article{guclu_deep_2015,
title = {Deep {{Neural Networks Reveal}} a {{Gradient}} in the {{Complexity}} of {{Neural Representations}} across the {{Ventral Stream}}},
volume = {35},
issn = {0270-6474, 1529-2401},
doi = {10.1523/JNEUROSCI.5023-14.2015},
language = {en},
timestamp = {2015-07-10T08:17:45Z},
number = {27},
urldate = {2015-07-10},
journal = {Journal of Neuroscience},
author = {Guclu, U. and {van Gerven}, M. A. J.},
month = jul,
year = {2015},
pages = {10005--10014},
file = {Guclu_van Gerven_2015_Journal of Neuroscience.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2015/Guclu_van Gerven_2015_Journal of Neuroscience.pdf:application/pdf}
}
@inproceedings{kummerer_deep_2015,
title = {Deep {{Gaze I}}: {{Boosting Saliency Prediction}} with {{Feature Maps Trained}} on {{ImageNet}}},
timestamp = {2015-07-21T09:56:54Z},
booktitle = {{{ICLR Workshop}}},
author = {K{\"u}mmerer, M. and Theis, L. and Bethge, M.},
month = may,
year = {2015},
keywords = {deep learning,saliency},
file = {Kümmerer_etal_2015_ICLR Workshop.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2015/Kümmerer_etal_2015_ICLR Workshop.pdf:application/pdf}
}
@article{wagenmakers_bayesian_2010,
title = {Bayesian Hypothesis Testing for Psychologists: {{A}} Tutorial on the {{Savage}}\textendash{}{{Dickey}} Method},
volume = {60},
issn = {00100285},
shorttitle = {Bayesian Hypothesis Testing for Psychologists},
doi = {10.1016/j.cogpsych.2009.12.001},
language = {en},
timestamp = {2015-09-04T07:32:23Z},
number = {3},
urldate = {2015-09-04},
journal = {Cognitive Psychology},
author = {Wagenmakers, Eric-Jan and Lodewyckx, Tom and Kuriyal, Himanshu and Grasman, Raoul},
month = may,
year = {2010},
pages = {158--189},
file = {Wagenmakers_etal_2010_Cognitive Psychology.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2010/Wagenmakers_etal_2010_Cognitive Psychology.pdf:application/pdf}
}
@article{cadieu_deep_2014,
title = {Deep {{Neural Networks Rival}} the {{Representation}} of {{Primate IT Cortex}} for {{Core Visual Object Recognition}}},
volume = {10},
issn = {1553-7358},
doi = {10.1371/journal.pcbi.1003963},
language = {en},
timestamp = {2015-09-09T11:20:05Z},
number = {12},
urldate = {2015-09-09},
journal = {PLoS Computational Biology},
author = {Cadieu, Charles F. and Hong, Ha and Yamins, Daniel L. K. and Pinto, Nicolas and Ardila, Diego and Solomon, Ethan A. and Majaj, Najib J. and DiCarlo, James J.},
editor = {Bethge, Matthias},
month = dec,
year = {2014},
pages = {e1003963},
file = {journal.pcbi.1003963.pdf:/Users/tsawallis/Dropbox/Zotero_Library/zotero/storage/FW9VG5V5/journal.pcbi.1003963.pdf:application/pdf}
}
@article{simonyan_very_2015,
title = {Very {{Deep Convolutional Networks}} for {{Large}}-{{Scale Image Recognition}}},
volume = {abs/1409.1556},
timestamp = {2016-05-03T19:52:48Z},
journal = {ICLR},
author = {Simonyan, Karen and Zisserman, Andrew},
year = {2015},
file = {Simonyan_Zisserman_2014_CoRR.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2014/Simonyan_Zisserman_2014_CoRR.pdf:application/pdf}
}
@article{morey_fallacy_2015,
title = {The Fallacy of Placing Confidence in Confidence Intervals},
issn = {1069-9384, 1531-5320},
doi = {10.3758/s13423-015-0947-8},
language = {en},
timestamp = {2015-10-19T09:22:58Z},
urldate = {2015-10-19},
journal = {Psychonomic Bulletin \& Review},
author = {Morey, Richard D. and Hoekstra, Rink and Rouder, Jeffrey N. and Lee, Michael D. and Wagenmakers, Eric-Jan},
month = oct,
year = {2015},
file = {Morey_etal_2015_Psychonomic Bulletin & Review.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2015/Morey_etal_2015_Psychonomic Bulletin & Review.pdf:application/pdf}
}
@article{balas_invariant_2015,
title = {Invariant Texture Perception Is Harder with Synthetic Textures: {{Implications}} for Models of Texture Processing},
volume = {115},
issn = {00426989},
shorttitle = {Invariant Texture Perception Is Harder with Synthetic Textures},
doi = {10.1016/j.visres.2015.01.022},
language = {en},
timestamp = {2016-02-24T10:16:33Z},
urldate = {2015-11-17},
journal = {Vision Research},
author = {Balas, Benjamin J and Conlin, Catherine},
month = oct,
year = {2015},
pages = {271--279},
file = {Balas_Conlin_2015_Vision Research.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2015/Balas_Conlin_2015_Vision Research.pdf:application/pdf}
}
@article{baayen_mixed-effects_2008,
title = {Mixed-Effects Modeling with Crossed Random Effects for Subjects and Items},
volume = {59},
issn = {0749596X},
doi = {10.1016/j.jml.2007.12.005},
language = {en},
timestamp = {2016-05-05T14:32:00Z},
number = {4},
urldate = {2016-05-05},
journal = {Journal of Memory and Language},
author = {Baayen, R.H. and Davidson, D.J. and Bates, D.M.},
month = nov,
year = {2008},
pages = {390--412},
file = {baayenDavidsonBates.pdf:/Users/tsawallis/Dropbox/Zotero_Library/zotero/storage/W43PP36A/baayenDavidsonBates.pdf:application/pdf}
}
@article{morey_continued_2015,
title = {Continued Misinterpretation of Confidence Intervals: Response to {{Miller}} and {{Ulrich}}},
issn = {1069-9384, 1531-5320},
shorttitle = {Continued Misinterpretation of Confidence Intervals},
doi = {10.3758/s13423-015-0955-8},
language = {en},
timestamp = {2016-01-13T09:11:56Z},
urldate = {2016-01-13},
journal = {Psychonomic Bulletin \& Review},
author = {Morey, Richard D. and Hoekstra, Rink and Rouder, Jeffrey N. and Wagenmakers, Eric-Jan},
month = nov,
year = {2015},
file = {Morey_etal_2015_Psychonomic Bulletin & Review3.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2015/Morey_etal_2015_Psychonomic Bulletin & Review3.pdf:application/pdf}
}
@article{miller_interpreting_2015,
title = {Interpreting Confidence Intervals: {{A}} Comment on {{Hoekstra}}, {{Morey}}, {{Rouder}}, and {{Wagenmakers}} (2014)},
issn = {1069-9384, 1531-5320},
shorttitle = {Interpreting Confidence Intervals},
doi = {10.3758/s13423-015-0859-7},
language = {en},
timestamp = {2016-01-13T09:11:58Z},
urldate = {2016-01-13},
journal = {Psychonomic Bulletin \& Review},
author = {Miller, Jeff and Ulrich, Rolf},
month = nov,
year = {2015},
file = {Miller_Ulrich_2015_Psychonomic Bulletin & Review.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2015/Miller_Ulrich_2015_Psychonomic Bulletin & Review.pdf:application/pdf}
}
@inproceedings{heeger_pyramidbased_1995,
address = {New York, NY, USA},
series = {SIGGRAPH '95},
title = {Pyramid-Based {{Texture Analysis}}/{{Synthesis}}},
isbn = {0-89791-701-4},
doi = {10.1145/218380.218446},
timestamp = {2016-02-17T15:38:33Z},
booktitle = {Proceedings of the {{22Nd Annual Conference}} on {{Computer Graphics}} and {{Interactive Techniques}}},
publisher = {{ACM}},
author = {Heeger, David J. and Bergen, James R.},
year = {1995},
pages = {229--238},
file = {Heeger_Bergen_1995.pdf:/Users/tsawallis/Dropbox/Zotero_Library/1995/Heeger_Bergen_1995.pdf:application/pdf}
}
@inproceedings{judd_learning_2009,
address = {Kyoto},
title = {Learning to Predict Where Humans Look},
doi = {10.1109/ICCV.2009.5459462},
timestamp = {2016-02-17T15:49:58Z},
booktitle = {{{IEEE}} 12th {{International Conference}} on {{Computer Vision}}},
author = {Judd, T. and Ehinger, K. and Durand, F. and Torralba, A.},
month = sep,
year = {2009},
keywords = {Application software,Biological system modeling,Biology computing,Computer Graphics,Context modeling,eye tracking data,feature extraction,high-level image features,human computer interaction,Image databases,Layout,Predictive models,saliency approaches,Spatial databases,top-down image semantics,tracking},
pages = {2106--2113},
file = {Judd_etal_2009.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2009/Judd_etal_2009.pdf:application/pdf}
}
@article{movshon_representation_2014,
title = {Representation of {{Naturalistic Image Structure}} in the {{Primate Visual Cortex}}},
volume = {79},
issn = {0091-7451, 1943-4456},
doi = {10.1101/sqb.2014.79.024844},
language = {en},
timestamp = {2016-02-17T15:58:49Z},
urldate = {2016-02-17},
journal = {Cold Spring Harbor Symposia on Quantitative Biology},
author = {Movshon, J. Anthony and Simoncelli, Eero P.},
year = {2014},
pages = {115--122},
file = {Movshon_Simoncelli_2014_Cold Spring Harbor Symposia on Quantitative Biology.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2014/Movshon_Simoncelli_2014_Cold Spring Harbor Symposia on Quantitative Biology.pdf:application/pdf}
}
@article{yamins_using_2016,
title = {Using Goal-Driven Deep Learning Models to Understand Sensory Cortex},
volume = {19},
issn = {1097-6256, 1546-1726},
doi = {10.1038/nn.4244},
timestamp = {2016-02-24T08:54:28Z},
number = {3},
urldate = {2016-02-24},
journal = {Nature Neuroscience},
author = {Yamins, Daniel L K and DiCarlo, James J},
month = feb,
year = {2016},
pages = {356--365},
file = {Yamins_DiCarlo_2016_Nature Neuroscience.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2016/Yamins_DiCarlo_2016_Nature Neuroscience.pdf:application/pdf}
}
@article{barr_random_2013,
title = {Random Effects Structure for Confirmatory Hypothesis Testing: {{Keep}} It Maximal},
volume = {68},
issn = {0749596X},
shorttitle = {Random Effects Structure for Confirmatory Hypothesis Testing},
doi = {10.1016/j.jml.2012.11.001},
language = {en},
timestamp = {2016-02-19T12:10:56Z},
number = {3},
urldate = {2016-02-19},
journal = {Journal of Memory and Language},
author = {Barr, Dale J. and Levy, Roger and Scheepers, Christoph and Tily, Harry J.},
month = apr,
year = {2013},
pages = {255--278},
file = {Barr_etal_2013_Journal of Memory and Language.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2013/Barr_etal_2013_Journal of Memory and Language.pdf:application/pdf;Barr_etal_2013_Journal of Memory and Language.txt:/Users/tsawallis/Dropbox/Zotero_Library/2013/Barr_etal_2013_Journal of Memory and Language.txt:text/plain}
}
@article{keshvari_pooling_2016,
title = {Pooling of Continuous Features Provides a Unifying Account of Crowding},
volume = {16},
issn = {1534-7362},
doi = {10.1167/16.3.39},
language = {en},
timestamp = {2016-03-08T13:54:35Z},
number = {3},
urldate = {2016-03-08},
journal = {Journal of Vision},
author = {Keshvari, Shaiyan and Rosenholtz, Ruth},
month = feb,
year = {2016},
pages = {39},
file = {Keshvari_Rosenholtz_2016_Journal of Vision.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2016/Keshvari_Rosenholtz_2016_Journal of Vision.pdf:application/pdf}
}
@article{bates_fitting_2015,
title = {Fitting {{Linear Mixed}}-{{Effects Models Using}} Lme4},
volume = {67},
doi = {10.18637/jss.v067.i01},
timestamp = {2016-04-04T11:07:08Z},
number = {1},
journal = {Journal of Statistical Software},
author = {Bates, Douglas and M{\"a}chler, Martin and Bolker, Ben and Walker, Steve},
year = {2015},
pages = {1--48},
file = {Bates_etal_2015_Journal of Statistical Software.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2015/Bates_etal_2015_Journal of Statistical Software_2.pdf:application/pdf}
}
@inproceedings{safranek_perceptually_1989,
title = {A Perceptually Tuned Sub-Band Image Coder with Image Dependent Quantization and Post-Quantization Data Compression},
doi = {10.1109/ICASSP.1989.266837},
timestamp = {2016-09-07T11:54:42Z},
urldate = {2016-06-23},
publisher = {{IEEE}},
author = {Safranek, Robert J and Johnston, J.D.},
year = {1989},
pages = {1945--1948}
}
@article{wallis_testing_2016,
title = {Testing Models of Peripheral Encoding Using Metamerism in an Oddity Paradigm},
volume = {16},
issn = {1534-7362},
doi = {10.1167/16.2.4},
language = {en},
timestamp = {2016-03-14T10:22:18Z},
number = {2},
urldate = {2016-03-14},
journal = {Journal of Vision},
author = {Wallis, Thomas S. A. and Bethge, Matthias and Wichmann, Felix A.},
month = mar,
year = {2016},
pages = {4},
file = {Wallis_etal_2016_Journal of Vision.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2016/Wallis_etal_2016_Journal of Vision.pdf:application/pdf}
}
@article{vehtari_practical_2016,
title = {Practical {{Bayesian}} Model Evaluation Using Leave-One-out Cross-Validation and {{WAIC}}${_\ast}$},
timestamp = {2016-09-28T11:21:39Z},
urldate = {2016-03-17},
journal = {arXiv preprint arXiv:1507.04544},
author = {Vehtari, Aki and Gelman, Andrew and Gabry, Jonah},
year = {2016},
file = {Vehtari_etal_2016_arXiv preprint arXiv1507.04544.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2016/Vehtari_etal_2016_arXiv preprint arXiv1507.04544.pdf:application/pdf}
}
@article{kehrer_central_1989,
title = {Central Performance Drop on Perceptual Segregation Tasks},
volume = {4},
timestamp = {2016-10-26T12:06:38Z},
number = {1},
urldate = {2016-10-26},
journal = {Spatial vision},
author = {Kehrer, Lothar},
year = {1989},
pages = {45--62},
file = {Kehrer_1989_Spatial vision.pdf:/Users/tsawallis/Dropbox/Zotero_Library/1989/Kehrer_1989_Spatial vision.pdf:application/pdf}
}
@book{buerkner_brms_2016,
title = {Brms: {{Bayesian Regression Models}} Using {{Stan}}},
timestamp = {2016-06-20T13:12:14Z},
author = {Buerkner, Paul-Christian},
year = {2016},
file = {Buerkner_2016.pdf:/Users/tsawallis/Dropbox/Zotero_Library/2016/Buerkner_2016.pdf:application/pdf}
}