-
Notifications
You must be signed in to change notification settings - Fork 0
/
tad.bib
10570 lines (9810 loc) · 796 KB
/
tad.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@book{1994a,
title = {Generatingfunctionology},
author = {Wilf, H. S.},
year = {1994},
publisher = {{Elsevier}},
doi = {10.1016/c2009-0-02369-1},
file = {/Users/driscoll/Dropbox/library/Book/Wilf_1994_generatingfunctionology.pdf}
}
@article{1996a,
title = {Table of Contents},
author = {Middendorf, Joan and Kalish, Alan},
year = {1996},
month = feb,
journal = {The National Teaching {{\&}} Learning Forum},
volume = {5},
number = {2},
pages = {1--12},
publisher = {{Wiley-Blackwell}},
doi = {10.1002/ntlf.10026},
file = {/Users/driscoll/Dropbox/library/Journal Article/Middendorf_Kalish_1996_Table of contents.pdf}
}
@book{2000a,
title = {How People Learn},
year = {2000},
month = aug,
publisher = {{National Academies Press}},
doi = {10.17226/9853},
file = {/Users/driscoll/Dropbox/library/Book/2000_How people learn.pdf}
}
@article{Abelson_Breaking_2007,
title = {Breaking down the Blink},
author = {Abelson, Mark B and Walker, Pamela},
year = {2007},
keywords = {No DOI found}
}
@book{abramowitzHandbookMathematicalFunctions2013,
title = {Handbook of Mathematical Functions: With Formulas, Graphs, and Mathematical Tables},
shorttitle = {Handbook of Mathematical Functions},
editor = {Abramowitz, Milton and Stegun, Irene A.},
year = {2013},
series = {Dover Books on Mathematics},
edition = {9. Dover print.; [Nachdr. der Ausg. von 1972]},
publisher = {{Dover Publ}},
address = {{New York, NY}},
isbn = {978-0-486-61272-0},
langid = {english},
annotation = {OCLC: 935935300}
}
@article{AdcockAdaptiveSampling2022,
title = {An {{Adaptive}} Sampling and Domain Learning Strategy for Multivariate Function Approximation on Unknown Domains},
author = {Adcock, Ben and Cardenas, Juan M. and Dexter, Nick},
year = {2022},
publisher = {{arXiv}},
doi = {10.48550/ARXIV.2202.00144},
urldate = {2024-02-26},
abstract = {Many problems in computational science and engineering can be described in terms of approximating a smooth function of \$d\$ variables, defined over an unknown domain of interest \${\textohm}{\textbackslash}subset {\textbackslash}mathbb\{R\}\^{}d\$, from sample data. Here both the curse of dimensionality (\$d{\textbackslash}gg 1\$) and the lack of domain knowledge with \${\textohm}\$ potentially irregular and/or disconnected are confounding factors for sampling-based methods. Na\"{\i}ve approaches often lead to wasted samples and inefficient approximation schemes. For example, uniform sampling can result in upwards of 20{\textbackslash}\% wasted samples in some problems. In surrogate model construction in computational uncertainty quantification (UQ), the high cost of computing samples needs a more efficient sampling procedure. In the last years, methods for computing such approximations from sample data have been studied in the case of irregular domains. The advantages of computing sampling measures depending on an approximation space \$P\$ of \${\textbackslash}dim(P)=N\$ have been shown. In particular, such methods confer advantages such as stability and well-conditioning, with \${\textbackslash}mathcal\{O\}(N{\textbackslash}log(N))\$ as sample complexity. The recently-proposed adaptive sampling for general domains (ASGD) strategy is one method to construct these sampling measures. The main contribution of this paper is to improve ASGD by adaptively updating the sampling measures over unknown domains. We achieve this by first introducing a general domain adaptivity strategy (GDAS), which approximates the function and domain of interest from sample points. Second, we propose adaptive sampling for unknown domains (ASUD), which generates sampling measures over a domain that may not be known in advance. Our results show that the ASUD approach consistently achieves the same or smaller errors as uniform sampling, but using fewer, and often significantly fewer evaluations.},
copyright = {arXiv.org perpetual, non-exclusive license},
keywords = {FOS: Mathematics,Numerical Analysis (math.NA)}
}
@article{AdcockAdaptiveSampling2023,
title = {An {{Adaptive Sampling}} and {{Domain Learning Strategy}} for {{Multivariate Function Approximation}} on {{Unknown Domains}}},
author = {Adcock, Ben and Cardenas, Juan M. and Dexter, Nick},
year = {2023},
month = feb,
journal = {SIAM Journal on Scientific Computing},
volume = {45},
number = {1},
pages = {A200-A225},
publisher = {{Society for Industrial and Applied Mathematics}},
issn = {1064-8275},
doi = {10.1137/22M1472693},
urldate = {2023-04-26},
abstract = {In this paper, we address the problem of approximating a multivariate function defined on a general domain in \$d\$ dimensions from sample points. We consider weighted least-squares approximation in an arbitrary finite-dimensional space \$P\$ from independent random samples taken according to a suitable measure. In general, least-squares approximations can be inaccurate and ill-conditioned when the number of sample points \$M\$ is close to \$N = {\textbackslash}dim(P)\$. To counteract this, we introduce a novel method for sampling in general domains which leads to provably accurate and well-conditioned approximations. The resulting sampling measure is discrete and therefore straightforward to sample from. Our main result shows near-optimal sample complexity for this procedure; specifically, \$M = {\textbackslash}mathcal\{O\}(N {\textbackslash}log(N))\$ samples suffice for a well-conditioned and accurate approximation. Numerical experiments on polynomial approximation in general domains confirm the benefits of this method over standard sampling.},
file = {/Users/driscoll/Dropbox/library/Journal Article/Adcock et al_2023_An Adaptive Sampling and Domain Learning Strategy for Multivariate Function.pdf}
}
@misc{adcockCompressedSensingApproaches2017,
title = {Compressed Sensing Approaches for Polynomial Approximation of High-Dimensional Functions},
author = {Adcock, Ben and Brugiapaglia, Simone and Webster, Clayton G.},
year = {2017},
month = jun,
number = {arXiv:1703.06987},
eprint = {1703.06987},
primaryclass = {math},
publisher = {{arXiv}},
urldate = {2022-07-11},
abstract = {In recent years, the use of sparse recovery techniques in the approximation of high-dimensional functions has garnered increasing interest. In this work we present a survey of recent progress in this emerging topic. Our main focus is on the computation of polynomial approximations of high-dimensional functions on \$d\$-dimensional hypercubes. We show that smooth, multivariate functions possess expansions in orthogonal polynomial bases that are not only approximately sparse, but possess a particular type of structured sparsity defined by so-called lower sets. This structure can be exploited via the use of weighted \${\textbackslash}ell\^{}1\$ minimization techniques, and, as we demonstrate, doing so leads to sample complexity estimates that are at most logarithmically dependent on the dimension \$d\$. Hence the curse of dimensionality - the bane of high-dimensional approximation - is mitigated to a significant extent. We also discuss several practical issues, including unknown noise (due to truncation or numerical error), and highlight a number of open problems and challenges.},
archiveprefix = {arxiv},
keywords = {Mathematics - Numerical Analysis},
file = {/Users/driscoll/Dropbox/library/Preprint/Adcock et al-2017-Compressed sensing approaches for polynomial approximation of high-dimensional.pdf;/Users/driscoll/Zotero/storage/N3N5V9YN/1703.html}
}
@article{adcockGapTheoryPractice2021,
title = {The {{Gap}} between {{Theory}} and {{Practice}} in {{Function Approximation}} with {{Deep Neural Networks}}},
author = {Adcock, Ben and Dexter, Nick},
year = {2021},
month = jan,
journal = {SIAM Journal on Mathematics of Data Science},
volume = {3},
number = {2},
pages = {624--655},
issn = {2577-0187},
doi = {10.1137/20m131309x},
urldate = {2021-06-14},
langid = {english},
file = {/Users/driscoll/Dropbox/library/Journal Article/Adcock_Dexter_2021_The Gap between Theory and Practice in Function Approximation with Deep Neural.pdf}
}
@misc{adcockMonteCarloBad2022,
title = {Is {{Monte Carlo}} a Bad Sampling Strategy for Learning Smooth Functions in High Dimensions?},
author = {Adcock, Ben and Brugiapaglia, Simone},
year = {2022},
month = aug,
number = {arXiv:2208.09045},
eprint = {2208.09045},
primaryclass = {cs, math},
publisher = {{arXiv}},
urldate = {2022-08-31},
abstract = {This paper concerns the approximation of smooth, high-dimensional functions from limited samples using polynomials. This task lies at the heart of many applications in computational science and engineering -- notably, those arising from parametric modelling and uncertainty quantification. It is common to use Monte Carlo (MC) sampling in such applications, so as not to succumb to the curse of dimensionality. However, it is well known this strategy is theoretically suboptimal. There are many polynomial spaces of dimension \$n\$ for which the sample complexity scales log-quadratically in \$n\$. This well-documented phenomenon has led to a concerted effort to design improved, in fact, near-optimal strategies, whose sample complexities scale log-linearly, or even linearly in \$n\$. Paradoxically, in this work we show that MC is actually a perfectly good strategy in high dimensions. We first document this phenomenon via several numerical examples. Next, we present a theoretical analysis that resolves this paradox for holomorphic functions of infinitely-many variables. We show that there is a least-squares scheme based on \$m\$ MC samples whose error decays algebraically fast in \$m/{\textbackslash}log(m)\$, with a rate that is the same as that of the best \$n\$-term polynomial approximation. This result is non-constructive, since it assumes knowledge of a suitable polynomial space in which to perform the approximation. We next present a compressed sensing-based scheme that achieves the same rate, except for a larger polylogarithmic factor. This scheme is practical, and numerically it performs as well as or better than well-known adaptive least-squares schemes. Overall, our findings demonstrate that MC sampling is eminently suitable for smooth function approximation when the dimension is sufficiently high. Hence the benefits of improved sampling strategies are generically limited to lower-dimensional settings.},
archiveprefix = {arxiv},
keywords = {Computer Science - Machine Learning,Mathematics - Functional Analysis,Mathematics - Numerical Analysis},
file = {/Users/driscoll/Dropbox/library/Preprint/Adcock_Brugiapaglia_2022_Is Monte Carlo a bad sampling strategy for learning smooth functions in high.pdf;/Users/driscoll/Zotero/storage/AIBT7DJW/2208.html}
}
@article{adcockNearOptimalSamplingStrategies2020,
title = {Near-{{Optimal Sampling Strategies}} for {{Multivariate Function Approximation}} on {{General Domains}}},
author = {Adcock, Ben and Cardenas, Juan M.},
year = {2020},
month = jan,
journal = {SIAM Journal on Mathematics of Data Science},
volume = {2},
number = {3},
pages = {607--630},
issn = {2577-0187},
doi = {10.1137/19M1279459},
urldate = {2022-07-11},
langid = {english},
file = {/Users/driscoll/Dropbox/library/Journal Article/Adcock_Cardenas-2020-Near-Optimal Sampling Strategies for Multivariate Function Approximation on.pdf}
}
@book{adcockSparsePolynomialApproximation2021,
title = {Sparse Polynomial Approximation of High-Dimensional Functions},
author = {Adcock, Ben and Brugiapaglia, Simone and Webster, Clayton G.},
year = {2021},
series = {Computational Science and Engineering},
number = {25},
publisher = {{Society for Industrial and Applied Mathematics}},
address = {{Philadelphia}},
abstract = {"This is a book about polynomial approximation in high dimensions"--},
isbn = {978-1-61197-687-8},
lccn = {QA221 .A33 2021},
keywords = {Approximation theory}
}
@article{AitonAdaptivePartition2018,
title = {An Adaptive Partition of Unity Method for {{Chebyshev}} Polynomial Interpolation},
author = {Aiton, Kevin W and Driscoll, Tobin A},
year = {2018},
journal = {SIAM Journal on Scientific Computing},
volume = {40},
number = {1},
pages = {A251--A265},
doi = {10.1137/17m112052x},
copyright = {All rights reserved},
file = {/Users/driscoll/Dropbox/library/Journal Article/Aiton_Driscoll_2019_An Adaptive Partition of Unity Method for Multivariate Chebyshev Polynomial.pdf}
}
@article{AitonAdaptivePartition2019,
title = {An {{Adaptive Partition}} of {{Unity Method}} for {{Multivariate Chebyshev Polynomial Approximations}}},
author = {Aiton, Kevin W. and Driscoll, Tobin A.},
year = {2019},
month = jan,
journal = {SIAM Journal on Scientific Computing},
volume = {41},
number = {5},
pages = {A3230-A3245},
issn = {1064-8275, 1095-7197},
doi = {10.1137/18m1184904},
urldate = {2022-02-11},
copyright = {All rights reserved},
langid = {english},
keywords = {No DOI found},
file = {/Users/driscoll/Dropbox/library/Journal Article/Aiton_Driscoll_2019_An Adaptive Partition of Unity Method for Multivariate Chebyshev Polynomial2.pdf}
}
@article{AitonPreconditionedNonlinear2020,
title = {Preconditioned {{Nonlinear Iterations}} for {{Overlapping Chebyshev Discretizations}} with {{Independent Grids}}},
author = {Aiton, Kevin W. and Driscoll, Tobin A.},
year = {2020},
month = jan,
journal = {SIAM Journal on Scientific Computing},
volume = {42},
number = {4},
pages = {A2360-A2370},
publisher = {{Society for Industrial and Applied Mathematics}},
issn = {1064-8275},
doi = {10.1137/19m1242483},
urldate = {2021-09-09},
abstract = {The additive Schwarz method is usually presented as a preconditioner for a PDE linearization based on overlapping subsets of nodes from a global discretization. It has previously been shown how to apply Schwarz preconditioning to a nonlinear problem. By first replacing the original global PDE with the Schwarz overlapping problem, the global discretization becomes a simple union of subdomain discretizations, and unknowns do not need to be shared. In this way, restrictive-type updates can be avoided, and subdomains need to communicate only via interface interpolations. The resulting preconditioner can be applied linearly or nonlinearly. In the latter case, nonlinear subdomain problems are solved independently in parallel, and the frequency and amount of interprocess communication can be greatly reduced compared to global preconditioning of the sequence of linearized problems.},
copyright = {All rights reserved},
keywords = {33F05,65N55,97N40,additive Schwarz,domain decomposition,No DOI found,partition of unity,polynomial interpolation},
file = {/Users/driscoll/Dropbox/library/Journal Article/Aiton_Driscoll-2020-Preconditioned Nonlinear Iterations for Overlapping Chebyshev Discretizations.pdf;/Users/driscoll/Dropbox/library/Journal Article/Aiton_Driscoll-2020-Preconditioned Nonlinear Iterations for Overlapping Chebyshev Discretizations2.pdf}
}
@inproceedings{alastrueyArterialPulseWave2012,
title = {Arterial Pulse Wave Haemodynamics},
booktitle = {11th {{International Conference}} on {{Pressure Surges}}},
author = {Alastruey, Jordi and Parker, Kim H and Sherwin, Spencer J and others},
year = {2012},
pages = {401--442},
publisher = {{Virtual PiE Led t/a BHR Group Lisbon, Portugal}},
keywords = {No DOI found},
file = {/Users/driscoll/Zotero/storage/JHA5QIAW/Alastruey et al. - 2012 - Arterial pulse wave haemodynamics.pdf}
}
@article{Albin2011,
title = {A Spectral {{FC}} Solver for the Compressible {{Navier}}--{{Stokes}} Equations in General Domains {{I}}: {{Explicit}} Time-Stepping},
author = {Albin, Nathan and Bruno, Oscar P.},
year = {2011},
month = jul,
journal = {Journal of Computational Physics},
volume = {230},
number = {16},
pages = {6248--6270},
publisher = {{Elsevier BV}},
doi = {10.1016/j.jcp.2011.04.023},
file = {/Users/driscoll/Dropbox/library/Journal Article/Albin_Bruno_2011_A spectral FC solver for the compressible Navier–Stokes equations in general.pdf}
}
@article{AlHassaniehLocalCompatibility2022,
title = {Local {{Compatibility Boundary Conditions}} for {{High-Order Accurate Finite-Difference Approximations}} of {{PDEs}}},
author = {Al Hassanieh, Nour G. and Banks, Jeffrey W. and Henshaw, William D. and Schwendeman, Donald W.},
year = {2022},
month = dec,
journal = {SIAM Journal on Scientific Computing},
volume = {44},
number = {6},
pages = {A3645-A3672},
publisher = {{Society for Industrial and Applied Mathematics}},
issn = {1064-8275},
doi = {10.1137/21M1458454},
urldate = {2023-01-09},
abstract = {We describe a new approach to derive numerical approximations of boundary conditions for high-order accurate finite-difference approximations. The approach, called the local compatibility boundary condition (LCBC) method, uses boundary conditions and compatibility boundary conditions derived from the governing equations, as well as interior and boundary grid values, to construct a local polynomial, whose degree matches the order of accuracy of the interior scheme, centered at each boundary point. The local polynomial is then used to derive a discrete formula for each ghost point in terms of the data. This approach leads to centered approximations that are generally more accurate and stable than one-sided approximations. Moreover, the stencil approximations are local since they do not couple to neighboring ghost-point values, which can occur with traditional compatibility conditions. The local polynomial is derived using continuous operators and derivatives, which enables the automatic construction of stencil approximations at different orders of accuracy. The LCBC method is developed here for problems governed by second-order partial differential equations, and it is verified in two space dimensions for schemes up to sixth-order accuracy.},
keywords = {65M06,65M12,65M20,65M22,boundary conditions,compatibility conditions,heat equation,high-order finite-differences,wave equation},
file = {/Users/driscoll/Dropbox/library/Journal Article/Al Hassanieh et al_2022_Local Compatibility Boundary Conditions for High-Order Accurate.pdf}
}
@article{Ali_Industrial_2006,
title = {Industrial Perspective in Ocular Drug Delivery},
author = {Ali, Yusuf and Lehmussaari, Kari},
year = {2006},
volume = {58},
number = {11},
pages = {1258--1268},
issn = {0169-409X},
doi = {10.1016/j.addr.2006.07.022},
abstract = {In the development of a commercial drug product, the formulator must consider various perspectives. The bioavailability of the active drug substance is often the major hurdle to overcome. In the past it has been common to add viscosity-enhancing agents or mucoadhesive polymers into formulations to improve ocular bioavailability. In addition to these conventional approaches, non-conventional technologies such as nanotechnology, microspheres and prodrugs could be considered to optimize the product.Along with bioavailability, the formulator must also consider the tolerability and stability of the final drug product. Quite often, the final formulation is the ideal compromise between the three.Authorities in different parts of the world have set strict requirements and guidelines for development and approval of drug products. In order to secure an expeditious development process and the shortest possible review and approval time, the formulator should be familiar with the current requirements and regulations.},
pmid = {17079049}
}
@article{alqahtaniSolutionIllposedProblems2022,
title = {Solution of Ill-Posed Problems with {{Chebfun}}},
author = {Alqahtani, A. and Mach, T. and Reichel, L.},
year = {2022},
month = sep,
journal = {Numerical Algorithms},
issn = {1572-9265},
doi = {10.1007/s11075-022-01390-z},
urldate = {2022-09-21},
abstract = {The analysis of linear ill-posed problems often is carried out in function spaces using tools from functional analysis. However, the numerical solution of these problems typically is computed by first discretizing the problem and then applying tools from finite-dimensional linear algebra. The present paper explores the feasibility of applying the Chebfun package to solve ill-posed problems with a regularize-first approach numerically. This allows a user to work with functions instead of vectors and with integral operators instead of matrices. The solution process therefore is much closer to the analysis of ill-posed problems than standard linear algebra-based solution methods. Furthermore, the difficult process of explicitly choosing a suitable discretization is not required.},
langid = {english},
keywords = {41A10,45B05,47A52,65F22,Chebfun,Ill-posed problem,Inverse problem,Tikhonov regularization,Truncated SVE},
file = {/Users/driscoll/Dropbox/library/Journal Article/Alqahtani et al_2022_Solution of ill-posed problems with Chebfun.pdf}
}
@article{amparoEvaluatingCorneal2017,
title = {Evaluating {{Corneal Fluorescein Staining Using}} a {{Novel Automated Method}}},
author = {Amparo, Francisco and Wang, Haobing and Yin, Jia and Marmalidou, Anna and Dana, Reza},
year = {2017},
month = may,
journal = {Investigative Ophthalmology \& Visual Science},
volume = {58},
number = {6},
pages = {BIO168-BIO173},
issn = {1552-5783},
doi = {10.1167/iovs.17-21831},
abstract = {Purpose: To evaluate interobserver concordance in measured corneal fluorescein staining (CFS) using the National Eye Institute/Industry (NEI) grading scale and the Corneal Fluorescein Staining Index (CFSi), a computer-assisted, objective, centesimal scoring system. Methods: We conducted a study to evaluate CFS in clinical photographs of patients with corneal epitheliopathy. One group of clinicians graded CFS in the images using the NEI while a second group applied the CFSi. We evaluated the level of interobserver agreement and differences among CFS scores with each method, level of correlation between the two methods, and distribution of cases based on the CFS severity assigned by each method. Results: The level of interobserver agreement was 0.65 (P {$<$} 0.001) with the NEI, and 0.99 (P {$<$} 0.001) with the CFSi. There were statistically significant differences among clinicians' measurements obtained with the NEI (P {$<$} 0.001), but not with the CFSi (P = 0.78). There was a statistically significant correlation between the CFS scores obtained with the two methods (R = 0.72; P {$<$} 0.001). The NEI scale allocated the majority of cases (65\%) within the higher quartile in the scale's severity (12-15/15). In contrast, the CFSi allocated the majority of cases (61\%) within the lower quartile in the scale's severity (0-25/100). Conclusions: The CFSi is easy to implement, provides higher interobserver consistency, and due to its continuous score can discriminate smaller differences in CFS. Reproducibility of the computer-based system is higher and, interestingly, the system allocates cases of epitheliopathy in different severity categories than clinicians do. The CFSi can be an alternative for objective CFS evaluation in the clinic and in clinical trials.},
langid = {english},
pmid = {28693042},
keywords = {Analysis of Variance,Corneal Diseases,Diagnostic Techniques Ophthalmological,Dry Eye Syndromes,Epithelium Corneal,Fluorescein,Fluorescent Dyes,Humans,Image Processing Computer-Assisted,Observer Variation,Prospective Studies,Reproducibility of Results},
file = {/Users/driscoll/Dropbox/library/Journal Article/Amparo et al-2017-Evaluating Corneal Fluorescein Staining Using a Novel.pdf}
}
@article{Anderson2010,
title = {A {{Rayleigh}}--{{Chebyshev}} Procedure for Finding the Smallest Eigenvalues and Associated Eigenvectors of Large Sparse {{Hermitian}} Matrices},
author = {Anderson, Christopher R.},
year = {2010},
month = sep,
journal = {Journal of Computational Physics},
volume = {229},
number = {19},
pages = {7477--7487},
publisher = {{Elsevier BV}},
doi = {10.1016/j.jcp.2010.06.030},
file = {/Users/driscoll/Dropbox/library/Journal Article/Anderson_2010_A Rayleigh–Chebyshev procedure for finding the smallest eigenvalues and.pdf}
}
@article{ankerstOPTICSOrderingPoints1999,
title = {{{OPTICS}}: Ordering Points to Identify the Clustering Structure},
shorttitle = {{{OPTICS}}},
author = {Ankerst, Mihael and Breunig, Markus M. and Kriegel, Hans-Peter and Sander, J{\"o}rg},
year = {1999},
month = jun,
journal = {ACM SIGMOD Record},
volume = {28},
number = {2},
pages = {49--60},
issn = {0163-5808},
doi = {10.1145/304181.304187},
urldate = {2022-04-09},
abstract = {Cluster analysis is a primary method for database mining. It is either used as a stand-alone tool to get insight into the distribution of a data set, e.g. to focus further analysis and data processing, or as a preprocessing step for other algorithms operating on the detected clusters. Almost all of the well-known clustering algorithms require input parameters which are hard to determine but have a significant influence on the clustering result. Furthermore, for many real-data sets there does not even exist a global parameter setting for which the result of the clustering algorithm describes the intrinsic clustering structure accurately. We introduce a new algorithm for the purpose of cluster analysis which does not produce a clustering of a data set explicitly; but instead creates an augmented ordering of the database representing its density-based clustering structure. This cluster-ordering contains information which is equivalent to the density-based clusterings corresponding to a broad range of parameter settings. It is a versatile basis for both automatic and interactive cluster analysis. We show how to automatically and efficiently extract not only 'traditional' clustering information (e.g. representative points, arbitrary shaped clusters), but also the intrinsic clustering structure. For medium sized data sets, the cluster-ordering can be represented graphically and for very large data sets, we introduce an appropriate visualization technique. Both are suitable for interactive exploration of the intrinsic clustering structure offering additional insights into the distribution and correlation of the data.},
keywords = {cluster analysis,database mining,visualization},
file = {/Users/driscoll/Dropbox/library/Journal Article/Ankerst et al-1999-OPTICS.pdf}
}
@article{anthimopoulosLungPatternClassification2016,
title = {Lung {{Pattern Classification}} for {{Interstitial Lung Diseases Using}} a {{Deep Convolutional Neural Network}}},
author = {Anthimopoulos, Marios and Christodoulidis, Stergios and Ebner, Lukas and Christe, Andreas and Mougiakakou, Stavroula},
year = {2016},
month = may,
journal = {IEEE Transactions on Medical Imaging},
volume = {35},
number = {5},
pages = {1207--1216},
issn = {1558-254X},
doi = {10.1109/tmi.2016.2535865},
abstract = {Automated tissue characterization is one of the most crucial components of a computer aided diagnosis (CAD) system for interstitial lung diseases (ILDs). Although much research has been conducted in this field, the problem remains challenging. Deep learning techniques have recently achieved impressive results in a variety of computer vision problems, raising expectations that they might be applied in other domains, such as medical image analysis. In this paper, we propose and evaluate a convolutional neural network (CNN), designed for the classification of ILD patterns. The proposed network consists of 5 convolutional layers with 2 {\texttimes} 2 kernels and LeakyReLU activations, followed by average pooling with size equal to the size of the final feature maps and three dense layers. The last dense layer has 7 outputs, equivalent to the classes considered: healthy, ground glass opacity (GGO), micronodules, consolidation, reticulation, honeycombing and a combination of GGO/reticulation. To train and evaluate the CNN, we used a dataset of 14696 image patches, derived by 120 CT scans from different scanners and hospitals. To the best of our knowledge, this is the first deep CNN designed for the specific problem. A comparative analysis proved the effectiveness of the proposed CNN against previous methods in a challenging dataset. The classification performance ( 85.5\%) demonstrated the potential of CNNs in analyzing lung patterns. Future work includes, extending the CNN to three-dimensional data provided by CT volume scans and integrating the proposed method into a CAD system that aims to provide differential diagnosis for ILDs as a supportive tool for radiologists.},
keywords = {Algorithms,automated tissue characterization,biological tissues,Computed tomography,computer aided diagnosis system,computer vision problems,computerised tomography,consolidation,convolution,Convolution,Convolutional neural networks,CT volume scans,deep convolutional neural network,deep learning techniques,Design automation,diseases,Diseases,feature extraction,Feature extraction,feature maps,ground glass opacity,honeycombing,Humans,ILD pattern classification,image classification,Image Interpretation Computer-Assisted,interstitial lung diseases,learning (artificial intelligence),lung,Lung,Lung Diseases Interstitial,lung pattern classification,Lungs,medical image analysis,medical image processing,micronodules,neural nets,Neural networks,Neural Networks (Computer),reticulation,texture classification,Tomography X-Ray Computed},
file = {/Users/driscoll/Dropbox/library/Journal Article/Anthimopoulos et al_2016_Lung Pattern Classification for Interstitial Lung Diseases Using a Deep.pdf;/Users/driscoll/Zotero/storage/5J3MCY8X/7422082.html}
}
@book{AntoulasInterpolatoryMethods2020,
title = {Interpolatory {{Methods}} for {{Model Reduction}}},
author = {Antoulas, A. C. and Beattie, C. A. and G{\"u}{\u g}ercin, S.},
year = {2020},
month = jan,
publisher = {{Society for Industrial and Applied Mathematics}},
address = {{Philadelphia, PA}},
doi = {10.1137/1.9781611976083},
urldate = {2023-04-12},
isbn = {978-1-61197-607-6 978-1-61197-608-3},
langid = {english}
}
@article{Ar_valo_2002,
title = {A Collocation Formulation of Multistep Methods for Variable Step-Size Extensions},
author = {Ar{\'e}valo, Carmen and F{\"u}hrer, Claus and Selva, M{\'undefined}nica},
year = {2002},
month = aug,
journal = {Applied Numerical Mathematics},
volume = {42},
number = {1-3},
pages = {5--16},
publisher = {{Elsevier BV}},
doi = {10.1016/s0168-9274(01)00138-6},
keywords = {Collocation,Multistep methods,Ordinary differential equations (ODEs),Variable step-size formulas},
file = {/Users/driscoll/Dropbox/library/Journal Article/Arévalo et al_2002_A collocation formulation of multistep methods for variable step-size extensions.pdf}
}
@article{arridgeSolvingInverseProblems2019,
title = {Solving Inverse Problems Using Data-Driven Models},
author = {Arridge, Simon and Maass, Peter and {\"O}ktem, Ozan and Sch{\"o}nlieb, Carola-Bibiane},
year = {2019},
month = may,
journal = {Acta Numerica},
volume = {28},
pages = {1--174},
publisher = {{Cambridge University Press}},
issn = {0962-4929, 1474-0508},
doi = {10.1017/S0962492919000059},
urldate = {2022-12-06},
abstract = {Recent research in inverse problems seeks to develop a mathematically coherent foundation for combining data-driven models, and in particular those based on deep learning, with domain-specific knowledge contained in physical--analytical models. The focus is on solving ill-posed inverse problems that are at the core of many challenging applications in the natural sciences, medicine and life sciences, as well as in engineering and industrial applications. This survey paper aims to give an account of some of the main contributions in data-driven inverse problems.},
langid = {english},
file = {/Users/driscoll/Dropbox/library/Journal Article/Arridge et al_2019_Solving inverse problems using data-driven models.pdf;/Users/driscoll/Zotero/storage/A28TVPS3/CE5B3725869AEAF46E04874115B0AB15.html}
}
@book{ascherComputerMethodsOrdinary1998,
title = {Computer Methods for Ordinary Differential Equations and Differential-Algebraic Equations},
author = {Ascher, U. M. and Petzold, Linda Ruth},
year = {1998},
publisher = {{Society for Industrial and Applied Mathematics}},
address = {{Philadelphia}},
isbn = {978-0-89871-412-8},
lccn = {QA372 .A78 1998},
keywords = {Data processing,Differential equations,Differential-algebraic equations}
}
@book{ascherNumericalSolutionBoundary1995,
title = {Numerical Solution of Boundary Value Problems for Ordinary Differential Equations},
author = {Ascher, U. M. and Mattheij, Robert M. M. and Russell, R. D.},
year = {1995},
series = {Classics in Applied Mathematics},
number = {13},
publisher = {{Society for Industrial and Applied Mathematics}},
address = {{Philadelphia}},
isbn = {978-0-89871-354-1},
lccn = {QA379 .A83 1995},
keywords = {Boundary value problems,Numerical solutions}
}
@article{atkinsonComputingLeastTrimmed1999,
title = {Computing Least Trimmed Squares Regression with the Forward Search},
author = {Atkinson, A. C. and Cheng, T.-C.},
year = {1999},
month = nov,
journal = {Statistics and Computing},
volume = {9},
number = {4},
pages = {251--263},
issn = {1573-1375},
doi = {10.1023/a:1008942604045},
urldate = {2020-11-06},
abstract = {Least trimmed squares (LTS) provides a parametric family of high breakdown estimators in regression with better asymptotic properties than least median of squares (LMS) estimators. We adapt the forward search algorithm of Atkinson (1994) to LTS and provide methods for determining the amount of data to be trimmed. We examine the efficiency of different trimming proportions by simulation and demonstrate the increasing efficiency of parameter estimation as larger proportions of data are fitted using the LTS criterion. Some standard data examples are analysed. One shows that LTS provides more stable solutions than LMS.},
langid = {english},
file = {/Users/driscoll/Dropbox/library/Journal Article/Atkinson_Cheng_1999_Computing least trimmed squares regression with the forward search.pdf}
}
@book{atkinsonElementaryNumericalAnalysis2004,
title = {Elementary Numerical Analysis},
author = {Atkinson, Kendall E. and Han, Weimin},
year = {2004},
edition = {3rd ed},
publisher = {{J. Wiley \& Sons}},
address = {{Hoboken, NJ}},
isbn = {978-0-471-43337-8},
lccn = {QA297 .A83 2004},
keywords = {Numerical analysis}
}
@book{atkinsonIntroductionNumericalAnalysis1989,
title = {An Introduction to Numerical Analysis},
author = {Atkinson, Kendall E.},
year = {1989},
edition = {2nd ed},
publisher = {{Wiley}},
address = {{New York}},
isbn = {978-0-471-62489-9},
lccn = {QA297 .A84 1989},
keywords = {Numerical analysis}
}
@article{aujolOptimalConvergenceRates2019,
title = {Optimal {{Convergence Rates}} for {{Nesterov Acceleration}}},
author = {Aujol, Jean-Francois and Dossal, Charles and Rondepierre, Aude},
year = {2019},
month = jan,
journal = {SIAM Journal on Optimization},
volume = {29},
number = {4},
pages = {3131--3153},
publisher = {{Society for Industrial and Applied Mathematics}},
issn = {1052-6234},
doi = {10.1137/18M1186757},
urldate = {2022-02-25},
abstract = {In this paper, we study the behavior of solutions of the ODE associated to Nesterov acceleration. It is well-known since the pioneering work of Nesterov that the rate of convergence \$O(1/t\^{}2)\$ is optimal for the class of convex functions with Lipschitz gradient. In this work, we show that better convergence rates can be obtained with some additional geometrical conditions, such as {\L} ojasiewicz property. More precisely, we prove the optimal convergence rates that can be obtained depending on the geometry of the function \$F\$ to minimize. The convergence rates are new, and they shed new light on the behavior of Nesterov acceleration schemes. We prove in particular that the classical Nesterov scheme may provide convergence rates that are worse than the classical gradient descent scheme on sharp functions: for instance, the convergence rate for strongly convex functions is not geometric for the classical Nesterov scheme (while it is the case for the gradient descent algorithm). This shows that applying the classical Nesterov acceleration on convex functions without looking more at the geometrical properties of the objective functions may lead to suboptimal algorithms.},
keywords = {34D05,65K05,65K10,90C25,90C30,Lojasiewicz property,Lyapunov functions,ODEs,optimization,rate of convergence},
file = {/Users/driscoll/Dropbox/library/Journal Article/Aujol et al_2019_Optimal Convergence Rates for Nesterov Acceleration.pdf}
}
@article{aurentzFastBackwardStable2015,
title = {Fast and {{Backward Stable Computation}} of {{Roots}} of {{Polynomials}}},
author = {Aurentz, Jared L. and Mach, Thomas and Vandebril, Raf and Watkins, David S.},
year = {2015},
month = jan,
journal = {SIAM Journal on Matrix Analysis and Applications},
volume = {36},
number = {3},
pages = {942--973},
issn = {0895-4798, 1095-7162},
doi = {10.1137/140983434},
urldate = {2020-06-18},
langid = {english},
file = {/Users/driscoll/Dropbox/library/Journal Article/Aurentz et al-2015-Fast and Backward Stable Computation of Roots of Polynomials.pdf}
}
@article{avanzoliniCADCSSimulationClosedloop1988,
title = {{{CADCS}} Simulation of the Closed-Loop Cardiovascular System},
author = {Avanzolini, Guido and Barbini, Paolo and Cappello, Angelo and Cevenini, Gabriele},
year = {1988},
month = jan,
journal = {International Journal of Bio-Medical Computing},
volume = {22},
number = {1},
pages = {39--49},
issn = {00207101},
doi = {10.1016/0020-7101(88)90006-2},
urldate = {2019-12-06},
langid = {english},
file = {/Users/driscoll/Dropbox/library/Journal Article/Avanzolini et al_1988_CADCS simulation of the closed-loop cardiovascular system.pdf}
}
@article{avanzoliniTimeVaryingMechanicalProperties1985,
title = {Time-{{Varying Mechanical Properties}} of the {{Left Ventricle-A Computer Simulation}}},
author = {Avanzolini, G. and Barbini, Paolo and Cappello, A. and Cevese, A.},
year = {1985},
month = oct,
journal = {IEEE Transactions on Biomedical Engineering},
volume = {BME-32},
number = {10},
pages = {756--763},
issn = {0018-9294},
doi = {10.1109/TBME.1985.325490},
abstract = {A numerical model of left ventricular (LV) pump function, incorporating cardiac muscle mechanics and LV geometry, was used to derive a simple linear model of local LV contractile properties. This simplified model views the ventricle as a pressure generator (related to isovolumic contraction) coupled with two time-varying elements: 1) a viscous term (related to the dissipative properties of the myocardium), and 2) an elastic term (related to the tension-length curve of activated fiber and to LV geometry).},
keywords = {HLHS},
file = {/Users/driscoll/Dropbox/library/Journal Article/Avanzolini et al_1985_Time-Varying Mechanical Properties of the Left Ventricle-A Computer Simulation.pdf}
}
@article{Awisi-GyauChangesCorneal2019,
title = {Changes in {{Corneal Detection Thresholds After Repeated Tear Film Instability}}},
author = {{Awisi-Gyau}, Deborah and Begley, Carolyn G. and Situ, Ping and Simpson, Trefford L.},
year = {2019},
month = oct,
journal = {Investigative Ophthalmology \& Visual Science},
volume = {60},
number = {13},
pages = {4234--4240},
issn = {1552-5783},
doi = {10.1167/iovs.19-27802},
urldate = {2022-08-05},
abstract = {To use a human-based model to study the effects of repeated tear film instability on corneal detection thresholds to cold, mechanical, and chemical stimuli. Twenty-five subjects participated in three study visits. A computer-controlled Belmonte esthesiometer was used to estimate corneal detection thresholds to cold, mechanical, and chemical stimuli before, after, and 30 minutes following 10 consecutive sustained tear exposure (STARE) trials. Subjects turned a pain knob (0--10) to indicate discomfort during STARE trials. The area of tear breakup and thinning in each trial was analyzed. Symptoms were evaluated by the Current Symptom Questionnaire (CSQ). There was a significant time effect on CSQ symptoms during both visits (Friedman test, P \< 0.001), with immediately after repeated STARE and 30 minutes later significantly differing from before STARE (Wilcoxon, P \< 0.017). Tear breakup occurred in every trial, ranging from 25\% to 88\% of the exposed corneal area and all subjects indicated discomfort during trials. There was a significant time effect on mechanical thresholds between before STARE mechanical thresholds and 30 minutes later (repeated measures analysis of variance [ANOVA] P \< 0.001), but not cold (P = 0.057) or chemical (P = 0. 565) thresholds. In this study, tear breakup during STARE trials was associated with discomfort, which when repeated, resulted in increased symptoms of ocular discomfort and alterations of mechanical sensory thresholds after 30 minutes. These results suggest that tear film instability, which is thought to occur repeatedly during normal blinking among dry eye patients over the day, can produce neurosensory alterations.},
file = {/Users/driscoll/Dropbox/library/Journal Article/Awisi-Gyau et al-2019-Changes in Corneal Detection Thresholds After Repeated Tear Film Instability.pdf;/Users/driscoll/Zotero/storage/F38QZJMS/article.html}
}
@article{aydemirEffectPolar2011,
title = {The {{Effect}} of {{Polar Lipids}} on {{Tear Film Dynamics}}},
author = {Aydemir, E. and Breward, C. J. W. and Witelski, T. P.},
year = {2011},
month = jun,
journal = {Bulletin of Mathematical Biology},
volume = {73},
number = {6},
pages = {1171--1201},
issn = {0092-8240, 1522-9602},
doi = {10.1007/s11538-010-9555-y},
urldate = {2020-07-11},
langid = {english},
file = {/Users/driscoll/Dropbox/library/Journal Article/Aydemir_Breward_Witelski-2011-The Effect of Polar Lipids on Tear Film Dynamics2.pdf}
}
@article{Babuska2010,
title = {A Stochastic Collocation Method for Elliptic Partial Differential Equations with Random Input Data},
author = {Babu{\v s}ka, Ivo and Nobile, Fabio and Tempone, Ra{\'u}l},
year = {2010},
month = jan,
journal = {SIAM Review},
volume = {52},
number = {2},
pages = {317--355},
publisher = {{Society for Industrial {{\&}} Applied Mathematics (SIAM)}},
doi = {10.1137/100786356},
file = {/Users/driscoll/Dropbox/library/Journal Article/Babuška et al_2010_A stochastic collocation method for elliptic partial differential equations.pdf}
}
@article{baerSingularHopfBifurcation1986,
title = {Singular {{Hopf Bifurcation}} to {{Relaxation Oscillations}}},
author = {Baer, S. M. and Erneux, T.},
year = {1986},
month = oct,
journal = {SIAM Journal on Applied Mathematics},
volume = {46},
number = {5},
pages = {721--739},
issn = {0036-1399, 1095-712X},
doi = {10.1137/0146047},
urldate = {2020-06-18},
langid = {english}
}
@article{bagbabaAutomatedGrading2018,
title = {An {{Automated Grading}} and {{Diagnosis System}} for {{Evaluation}} of {{Dry Eye Syndrome}}},
author = {Ba{\u g}baba, Ay{\c s}e and {\c S}en, Baha and Delen, Dursun and Uysal, Bet{\"u}l Seher},
year = {2018},
month = oct,
journal = {Journal of Medical Systems},
volume = {42},
number = {11},
pages = {227},
issn = {1573-689X},
doi = {10.1007/s10916-018-1086-3},
abstract = {This article describes methods used to determine the severity of Dry Eye Syndrome (DES) based on Oxford Grading Schema (OGS) automatically by developing and applying a decider model. The number of dry punctate dots occurred on corneal surface after corneal fluorescein staining can be used as a diagnostic indicator of DES severity according to OGS; however, grading of DES severity exactly by carefully assessing these dots is a rather difficult task for humans. Taking into account that current methods are also subjectively dependent on the perception of the ophtalmologists coupled with the time and resource intensive requirements, enhanced diagnosis techniques would greatly contribute to clinical assessment of DES. Automated grading system proposed in this study utilizes image processing methods in order to provide more objective and reliable diagnostic results for DES. A total of 70 fluorescein-stained cornea images from 20 patients with mild, moderate, or severe DES (labeled by an ophthalmologist in the Keratoconus Center of Yildirim Beyazit University Ataturk Training and Research Hospital) used as the participants for the study. Correlations between the number of dry punctate dots and DES severity levels were determined. When automatically created scores and clinical scores were compared, the following measures were observed: Pearson's correlation value between the two was 0.981; Lin's Concordance Correlation Coefficients (CCC) was 0.980; and 95\% confidence interval limites were 0.963 and 0.989. The automated DES grade was estimated from the regression fit and accordingly the unknown grade is calculated with the following formula: Gpred~=\,1.3244 log(Ndots) - 0.0612. The study has shown the viability and the utility of a highly successful automated DES diagnostic system based on OGS, which can be developed by working on the fluorescein-stained cornea images. Proper implemention of a computationally savvy and highly accurate classification system, can assist investigators to perform more objective and faster DES diagnoses in real-world scenerios.},
langid = {english},
pmid = {30298212},
keywords = {Cornea,Corneal images,Dry eye,Dry Eye Syndromes,Female,Fluorescein,Fluorescein staining,Fluorophotometry,Health Status Indicators,Humans,Image processing,Male,Oxford grading scale}
}
@article{BaggettMostlyLinear1995,
title = {A Mostly Linear Model of Transition to Turbulence},
author = {Baggett, Jeffrey S and Driscoll, Tobin A and Trefethen, Lloyd N},
year = {1995},
journal = {Physics of Fluids},
volume = {7},
number = {4},
pages = {833--838},
doi = {10.1063/1.868606},
copyright = {All rights reserved}
}
@unpublished{Bailey2006u,
title = {Tanh-Sinh High-Precision Quadrature},
author = {Bailey, David H.},
annotation = {19 Jan 2006},
file = {/Users/driscoll/Dropbox/library/Manuscript/Bailey_Tanh-sinh high-precision quadrature.pdf}
}
@article{BaileyComparisonThree2005,
title = {A Comparison of Three High-Precision Quadrature Schemes},
author = {Bailey, David H. and Jeyabalan, Karthik and Li, Xiaoye S.},
year = {2005},
month = jan,
journal = {Experimental Mathematics},
volume = {14},
number = {3},
pages = {317--329},
publisher = {{Informa UK Limited}},
doi = {10.1080/10586458.2005.10128931},
file = {/Users/driscoll/Dropbox/library/Journal Article/Bailey et al_2005_A comparison of three high-precision quadrature schemes.pdf;/Users/driscoll/Dropbox/library/Journal Article/Bailey_Jeyabalan_Li-2005-A Comparison of Three High-Precision Quadrature Schemes.pdf}
}
@article{BakerLowrankIncremental2012,
title = {Low-Rank Incremental Methods for Computing Dominant Singular Subspaces},
author = {Baker, C. G. and Gallivan, K. A. and Van Dooren, P.},
year = {2012},
month = apr,
journal = {Linear Algebra and its Applications},
series = {Special {{Issue}} Dedicated to {{Danny Sorensen}}'s 65th Birthday},
volume = {436},
number = {8},
pages = {2866--2888},
issn = {0024-3795},
doi = {10.1016/j.laa.2011.07.018},
urldate = {2022-06-03},
abstract = {Computing the singular values and vectors of a matrix is a crucial kernel in numerous scientific and industrial applications. As such, numerous methods have been proposed to handle this problem in a computationally efficient way. This paper considers a family of methods for incrementally computing the dominant SVD of a large matrix A. Specifically, we describe a unification of a number of previously independent methods for approximating the dominant SVD after a single pass through A. We connect the behavior of these methods to that of a class of optimization-based iterative eigensolvers on ATA. An iterative procedure is proposed which allows the computation of an accurate dominant SVD using multiple passes through A. We present an analysis of the convergence of this iteration and provide empirical demonstration of the proposed method on both synthetic and benchmark data.},
langid = {english},
keywords = {Convergence analysis,Incremental SVD,Iterative methods,Pass-efficient linear algebra,Singular value decomposition},
file = {/Users/driscoll/Dropbox/library/Journal Article/Baker et al-2012-Low-rank incremental methods for computing dominant singular subspaces.pdf;/Users/driscoll/Zotero/storage/W4JUGWE8/Baker et al. - 2012 - Low-rank incremental methods for computing dominan.pdf;/Users/driscoll/Zotero/storage/2X8I4NMP/S0024379511005301.html}
}
@article{BaltenspergerExponentialConvergence1999,
title = {Exponential Convergence of a Linear Rational Interpolant between Transformed {{Chebyshev}} Points},
author = {Baltensperger, Richard and Berrut, Jean-Paul and No{\"e}l, Benjamin},
year = {1999},
month = feb,
journal = {Mathematics of Computation},
volume = {68},
number = {227},
pages = {1109--1120},
issn = {0025-5718, 1088-6842},
doi = {10.1090/S0025-5718-99-01070-4},
urldate = {2023-04-18},
abstract = {In 1988 the second author presented experimentally well-conditioned linear rational functions for global interpolation. We give here arrays of nodes for which one of these interpolants converges exponentially for analytic functions},
langid = {english},
file = {/Users/driscoll/Dropbox/library/Journal Article/Baltensperger et al_1999_Exponential convergence of a linear rational interpolant between transformed.pdf}
}
@article{Bandlitz_Time_2014,
title = {Time Course of Changes in Tear Meniscus Radius and Blink Rate after Instillation of Artificial Tears},
author = {Bandlitz, Stefan and Purslow, Christine and Murphy, Paul J and Pult, Heiko},
year = {2014},
volume = {55},
number = {9},
pages = {5842},
issn = {1552-5783},
doi = {10.1167/iovs.14-14844}
}
@article{Bartels1972,
title = {Solution of the Matrix Equation {{AX}}+{{XB}}={{C}}},
author = {Bartels, R. H. and Stewart, G. W.},
year = {1972},
month = sep,
journal = {Communications of the ACM},
volume = {15},
number = {9},
pages = {820--826},
publisher = {{Association for Computing Machinery (ACM)}},
doi = {10.1145/361573.361582},
file = {/Users/driscoll/Dropbox/library/Journal Article/Bartels_Stewart_1972_Solution of the matrix equation AX+XB=C.pdf}
}
@article{Barton1971,
title = {The Automatic Solution of Systems of Ordinary Differential Equations by the Method of {{Taylor}} Series},
author = {Barton, D.},
year = {1971},
month = mar,
journal = {The Computer Journal},
volume = {14},
number = {3},
pages = {243--248},
publisher = {{Oxford University Press (OUP)}},
doi = {10.1093/comjnl/14.3.243},
file = {/Users/driscoll/Dropbox/library/Journal Article/Barton_1971_The automatic solution of systems of ordinary differential equations by the.pdf}
}
@article{Baszenski1997,
title = {Fast Polynomial Multiplication and Convolutions Related to the Discrete Cosine Transform},
author = {Baszenski, G{\"u}nter and Tasche, Manfred},
year = {1997},
month = feb,
journal = {Linear Algebra and its Applications},
volume = {252},
number = {1-3},
pages = {1--25},
publisher = {{Elsevier BV}},
doi = {10.1016/0024-3795(95)00696-6},
file = {/Users/driscoll/Dropbox/library/Journal Article/Baszenski_Tasche_1997_Fast polynomial multiplication and convolutions related to the discrete cosine.pdf}
}
@article{Bauer2000,
title = {Numerical Methods for Optimum Experimental Design in {{DAE}} Systems},
author = {Bauer, Irene and Bock, Hans Georg and K{\"o}rkel, Stefan and Schl{\"o}der, Johannes P.},
year = {2000},
month = aug,
journal = {Journal of Computational and Applied Mathematics},
volume = {120},
number = {1-2},
pages = {1--25},
publisher = {{Elsevier BV}},
doi = {10.1016/s0377-0427(00)00300-9},
keywords = {Chemical reaction kinetics,Direct approach,Internal numerical di erentiation,Nonlinear DAE models,Optimum experimental design,Parameter estimation},
file = {/Users/driscoll/Dropbox/library/Journal Article/Bauer et al_2000_Numerical methods for optimum experimental design in DAE systems.pdf}
}
@article{bayonaRolePolynomialsRBFFD2017,
title = {On the Role of Polynomials in {{RBF-FD}} Approximations: {{II}}. {{Numerical}} Solution of Elliptic {{PDEs}}},
shorttitle = {On the Role of Polynomials in {{RBF-FD}} Approximations},
author = {Bayona, Victor and Flyer, Natasha and Fornberg, Bengt and Barnett, Gregory A.},
year = {2017},
month = mar,
journal = {Journal of Computational Physics},
volume = {332},
pages = {257--273},
issn = {00219991},
doi = {10.1016/j.jcp.2016.12.008},
urldate = {2022-08-01},
langid = {english},
file = {/Users/driscoll/Dropbox/library/Journal Article/Bayona et al-2017-On the role of polynomials in RBF-FD approximations.pdf}
}
@article{bayonaRolePolynomialsRBFFD2019,
title = {On the Role of Polynomials in {{RBF-FD}} Approximations: {{III}}. {{Behavior}} near Domain Boundaries},
shorttitle = {On the Role of Polynomials in {{RBF-FD}} Approximations},
author = {Bayona, V{\'i}ctor and Flyer, Natasha and Fornberg, Bengt},
year = {2019},
month = mar,
journal = {Journal of Computational Physics},
volume = {380},
pages = {378--399},
issn = {00219991},
doi = {10.1016/j.jcp.2018.12.013},
urldate = {2022-08-01},
langid = {english},
file = {/Users/driscoll/Dropbox/library/Journal Article/Bayona et al-2019-On the role of polynomials in RBF-FD approximations.pdf}
}
@article{Beatson1998,
title = {Fast Evaluation of Radial Basis Functions: Moment-Based Methods},
author = {Beatson, R. K. and Newsam, G. N.},
year = {1998},
month = sep,
journal = {SIAM Journal on Scientific Computing},
volume = {19},
number = {5},
pages = {1428--1449},
publisher = {{Society for Industrial {{\&}} Applied Mathematics (SIAM)}},
doi = {10.1137/s1064827595293569},
file = {/Users/driscoll/Dropbox/library/Journal Article/Beatson_Newsam_1998_Fast evaluation of radial basis functions.pdf}
}
@article{Beatson2001,
title = {Fast Solution of the Radial Basis Function Interpolation Equations: Domain Decomposition Methods},
author = {Beatson, R. K. and Light, W. A. and Billings, S.},
year = {2001},
month = jan,
journal = {SIAM Journal on Scientific Computing},
volume = {22},
number = {5},
pages = {1717--1740},
publisher = {{Society for Industrial {{\&}} Applied Mathematics (SIAM)}},
doi = {10.1137/s1064827599361771},
file = {/Users/driscoll/Dropbox/library/Journal Article/Beatson et al_2001_Fast solution of the radial basis function interpolation equations.pdf}
}
@article{Beatson2010,
title = {Error Bounds for Anisotropic {{RBF}} Interpolation},
author = {Beatson, Rick and Davydov, Oleg and Levesley, Jeremy},
year = {2010},
journal = {Journal of Approximation Theory},
volume = {162},
number = {3},
pages = {512--527},
issn = {0021-9045},
doi = {10.1016/j.jat.2009.08.004},
mrnumber = {2600981},
keywords = {41A05 (65D05)}
}
@article{beattieSamplingfreeModelReduction2020,
title = {Sampling-Free Model Reduction of Systems with Low-Rank Parameterization},
author = {Beattie, Christopher and Gugercin, Serkan and Tomljanovi{\'c}, Zoran},
year = {2020},
month = nov,
journal = {Advances in Computational Mathematics},
volume = {46},
number = {6},
pages = {83},
issn = {1572-9044},
doi = {10.1007/s10444-020-09825-8},
urldate = {2021-03-22},
abstract = {We consider the reduction of parametric families of linear dynamical systems having an affine parameter dependence that allow for low-rank variation in the state matrix. Usual approaches for parametric model reduction typically involve exploring the parameter space to identify representative parameter values and the associated models become the principal focus of model reduction methodology. These models are then combined in various ways in order to interpolate the response. The initial exploration of the parameter space can be a forbiddingly expensive task. A different approach is proposed here that requires neither parameter sampling nor parameter space exploration. Instead, we represent the system response function as a composition of four subsystem response functions that are non-parametric with a purely parameter-dependent function. One may apply any one of a number of standard (non-parametric) model reduction strategies to reduce the subsystems independently, and then conjoin these reduced models with the underlying parameterization to obtain the overall parameterized response. Our approach has elements in common with the parameter mapping approach of Baur et al. (PAMM 14(1), 19--22 2014) but offers greater flexibility and potentially greater control over accuracy. In particular, a data-driven variation of our approach is described that exercises this flexibility through the use of limited frequency-sampling of the underlying non-parametric models. The parametric structure of our system representation allows for a priori guarantees of system stability in the resulting reduced models across the full range of parameter values. Incorporation of system theoretic error bounds allows us to determine appropriate approximation orders for the non-parametric systems sufficient to yield uniformly high accuracy across the parameter range. We illustrate our approach on a class of structural damping optimization problems and on a benchmark model of thermal conduction in a semiconductor chip. The parametric structure of our reduced system representation lends itself very well to the development of optimization strategies making use of efficient cost function surrogates. We discuss this in some detail for damping parameter and location optimization for vibrating structures.},
langid = {english},
file = {/Users/driscoll/Dropbox/library/Journal Article/Beattie et al_2020_Sampling-free model reduction of systems with low-rank parameterization.pdf}
}
@article{Bebendorf2010,
title = {Adaptive Cross Approximation of Multivariate Functions},
author = {Bebendorf, M.},
year = {2010},
month = jun,
journal = {Constructive Approximation},
volume = {34},
number = {2},
pages = {149--179},
publisher = {{Springer Nature}},
doi = {10.1007/s00365-010-9103-x},
file = {/Users/driscoll/Dropbox/library/Journal Article/Bebendorf_2010_Adaptive cross approximation of multivariate functions.pdf}
}
@article{begleyQuantitativeAnalysis2013,
title = {Quantitative {{Analysis}} of {{Tear Film Fluorescence}} and {{Discomfort During Tear Film Instability}} and {{Thinning}}},
author = {Begley, Carolyn G and Simpson, Trefford and Liu, Haixia and Salvo, Eliza and Wu, Ziwei and Bradley, Arthur and Situ, Ping},
year = {2013},
month = apr,
journal = {Investigative Ophthalmology \& Visual Science},
volume = {54},
number = {4},
pages = {2645--2653},
publisher = {{The Association for Research in Vision and Ophthalmology}},
issn = {1552-5783},
doi = {10.1167/iovs.12-11299},
urldate = {2020-11-24},
langid = {english},
file = {/Users/driscoll/Dropbox/library/Journal Article/Begley et al-2013-Quantitative Analysis of Tear Film Fluorescence and.pdf;/Users/driscoll/Zotero/storage/IRKCX9EK/article.html}
}
@inproceedings{bekkermanMultiwayDistributionalClustering2005,
title = {Multi-Way Distributional Clustering via Pairwise Interactions},
booktitle = {Proceedings of the 22nd International Conference on {{Machine}} Learning - {{ICML}} '05},
author = {Bekkerman, Ron and {El-Yaniv}, Ran and McCallum, Andrew},
year = {2005},
pages = {41--48},
publisher = {{ACM Press}},
address = {{Bonn, Germany}},
doi = {10.1145/1102351.1102357},
urldate = {2019-11-18},
abstract = {We present a novel unsupervised learning scheme that simultaneously clusters variables of several types (e.g., documents, words and authors) based on pairwise interactions between the types, as observed in co-occurrence data. In this scheme, multiple clustering systems are generated aiming at maximizing an objective function that measures multiple pairwise mutual information between cluster variables. To implement this idea, we propose an algorithm that interleaves top-down clustering of some variables and bottom-up clustering of the other variables, with a local optimization correction routine. Focusing on document clustering we present an extensive empirical study of two-way, three-way and four-way applications of our scheme using six real-world datasets including the 20 Newsgroups (20NG) and the Enron email collection. Our multi-way distributional clustering (MDC) algorithms consistently and significantly outperform previous state-of-the-art information theoretic clustering algorithms.},
isbn = {978-1-59593-180-1},
langid = {english},
file = {/Users/driscoll/Dropbox/library/Conference Paper/Bekkerman et al_2005_Multi-way distributional clustering via pairwise interactions.pdf}
}
@article{Belmonte_Cold_2011,
title = {Cold Thermoreceptors, Unexpected Players in Tear Production and Ocular Dryness Sensations.},
author = {Belmonte, Carlos and Gallar, Juana},
year = {2011},
volume = {52},
number = {6},
pages = {3888--92},
issn = {1552-5783},
doi = {10.1167/iovs.09-5119},
pmid = {21632706}
}
@article{ben-davidMeasuresClusteringQuality2009,
title = {Measures of {{Clustering Quality}}: {{A Working Set}} of {{Axioms}} for {{Clustering}}},
author = {{Ben-David}, Shai and Ackerman, Margareta},
year = {2009},
journal = {Advances in neural information processing systems},
pages = {121--128},
abstract = {Aiming towards the development of a general clustering theory, we discuss abstract axiomatization for clustering. In this respect, we follow up on the work of Kleinberg, ([1]) that showed an impossibility result for such axiomatization. We argue that an impossibility result is not an inherent feature of clustering, but rather, to a large extent, it is an artifact of the specific formalism used in [1].},
langid = {english},
keywords = {No DOI found},
file = {/Users/driscoll/Dropbox/library/Journal Article/Ben-David_Ackerman_2009_Measures of Clustering Quality.pdf}
}
@article{Berenger1994,
title = {A Perfectly Matched Layer for the Absorption of Electromagnetic Waves},
author = {Berenger, Jean-Pierre},
year = {1994},
month = oct,
journal = {Journal of Computational Physics},
volume = {114},
number = {2},
pages = {185--200},
publisher = {{Elsevier BV}},
doi = {10.1006/jcph.1994.1159},
file = {/Users/driscoll/Dropbox/library/Journal Article/Berenger_1994_A perfectly matched layer for the absorption of electromagnetic waves.pdf}
}
@article{Berenger1996,
title = {Three-Dimensional Perfectly Matched Layer for the Absorption of Electromagnetic Waves},
author = {Berenger, Jean-Pierre},
year = {1996},
month = sep,
journal = {Journal of Computational Physics},
volume = {127},
number = {2},
pages = {363--379},
publisher = {{Elsevier BV}},
doi = {10.1006/jcph.1996.0181},
file = {/Users/driscoll/Dropbox/library/Journal Article/Berenger_1996_Three-dimensional perfectly matched layer for the absorption of electromagnetic.pdf}
}
@article{bergenDetectingEarthquakesSeismic2018,
title = {Detecting Earthquakes over a Seismic Network Using Single-Station Similarity Measures},
author = {Bergen, Karianne J. and Beroza, Gregory C.},
year = {2018},
month = jun,
journal = {Geophysical Journal International},
volume = {213},
number = {3},
pages = {1984--1998},
publisher = {{Oxford Academic}},
issn = {0956-540X},
doi = {10.1093/gji/ggy100},
urldate = {2020-03-24},
abstract = {SUMMARY. New blind waveform-similarity-based detection methods, such as Fingerprint and Similarity Thresholding (FAST), have shown promise for detecting weak s},
langid = {english},
file = {/Users/driscoll/Zotero/storage/XKG4X4MP/Bergen and Beroza - 2018 - Detecting earthquakes over a seismic network using.pdf;/Users/driscoll/Zotero/storage/E9NUYGCS/4939266.html}
}
@article{Berger_EFFECT_1974,
title = {{{EFFECT OF CONTACT LENS MOTION ON THE OXYGEN TENSION DISTRIBUTION UNDER THE LENS}}*.},
author = {Berger, {\relax RE}},
year = {1974},
journal = {Optometry \& Vision Science},
doi = {10.1097/00006324-197407000-00001},
abstract = {Abstract A method of predicting the oxygen tension distribution under a contact lens is illustrated for some simple, but practical, lens motions. A hydrodynamic theory is used to find those regions which receive fresh tear fluid during a blink. Then a diffusion model is ...}
}
@article{Berke_The_1998,
title = {The Kinetics of Lid Motion and Its Effects on the Tear Film.},
author = {Berke, A and Mueller, S},
year = {1998},
journal = {Water Relationships in Foods},
volume = {438},
pages = {417--24},
issn = {0065-2598},
pmid = {9634916}
}
@incollection{berkeKineticsLidMotion1998,
title = {The {{Kinetics}} of {{Lid Motion}} and Its {{Effects}} on the {{Tear Film}}},
booktitle = {Lacrimal {{Gland}}, {{Tear Film}}, and {{Dry Eye Syndromes}} 2},
author = {Berke, A. and Mueller, S.},
editor = {Sullivan, David A. and Dartt, Darlene A. and Meneray, Michele A.},
year = {1998},
volume = {438},
pages = {417--424},
publisher = {{Springer US}},
address = {{Boston, MA}},
doi = {10.1007/978-1-4615-5359-5_58},
urldate = {2020-06-18},
isbn = {978-1-4613-7445-9 978-1-4615-5359-5},
langid = {english}
}
@article{Berland2007,
title = {{{EXPINT}}---{{A MATLAB}} Package for Exponential Integrators},
author = {Berland, H{\aa}vard and Skaflestad, B{\aa}rd and Wright, Will M.},
year = {2007},
month = mar,
journal = {ACM Transactions on Mathematical Software},
volume = {33},
number = {1},
pages = {4-es},
publisher = {{Association for Computing Machinery (ACM)}},
doi = {10.1145/1206040.1206044},
file = {/Users/driscoll/Dropbox/library/Journal Article/Berland et al_2007_EXPINT—A MATLAB package for exponential integrators.pdf}
}
@article{Berljafa2017,
title = {The {{RKFIT}} Algorithm for Nonlinear Rational Approximation},
author = {Berljafa, Mario and G{\"u}ttel, Stefan},
year = {2017},