Commit
- Loading branch information
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,6 +19,4 @@ MyMakevars | |
*.Rproj | ||
|
||
/packages/nimble/inst/CppCode/Makeconf | ||
/packages/nimble/inst/include/cppad/ | ||
|
||
|
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,101 @@ | ||
|
||
@book{griewank-walther-08, | ||
address = {Philadelphia, PA}, | ||
edition = {Second edition}, | ||
title = {Evaluating {Derivatives}: {Principles} and {Techniques} of {Algorithmic} {Differentiation}, {Second} {Edition}}, | ||
isbn = {978-0-89871-659-7}, | ||
shorttitle = {Evaluating {Derivatives}}, | ||
abstract = {Algorithmic, or automatic, differentiation (AD) is a growing area of theoretical research and software development concerned with the accurate and efficient evaluation of derivatives for function evaluations given as computer programs. The resulting derivative values are useful for all scientific computations that are based on linear, quadratic, or higher order approximations to nonlinear scalar or vector functions. AD has been applied in particular to optimization, parameter identification, nonlinear equation solving, the numerical integration of differential equations, and combinations of these. Apart from quantifying sensitivities numerically, AD also yields structural dependence information, such as the sparsity pattern and generic rank of Jacobian matrices. The field opens up an exciting opportunity to develop new algorithms that reflect the true cost of accurate derivatives and to use them for improvements in speed and reliability. This second edition has been updated and expanded to cover recent developments in applications and theory, including an elegant NP completeness argument by Uwe Naumann and a brief introduction to scarcity, a generalization of sparsity. There is also added material on checkpointing and iterative differentiation. To improve readability the more detailed analysis of memory and complexity bounds has been relegated to separate, optional chapters.The book consists of three parts: a stand-alone introduction to the fundamentals of AD and its software; a thorough treatment of methods for sparse problems; and final chapters on program-reversal schedules, higher derivatives, nonsmooth problems and iterative processes. Each of the 15 chapters concludes with examples and exercises. Audience: This volume will be valuable to designers of algorithms and software for nonlinear computational problems. Current numerical software users should gain the insight necessary to choose and deploy existing AD software tools to the best advantage. Contents: Rules; Preface; Prologue; Mathematical Symbols; Chapter 1: Introduction; Chapter 2: A Framework for Evaluating Functions; Chapter 3: Fundamentals of Forward and Reverse; Chapter 4: Memory Issues and Complexity Bounds; Chapter 5: Repeating and Extending Reverse; Chapter 6: Implementation and Software; Chapter 7: Sparse Forward and Reverse; Chapter 8: Exploiting Sparsity by Compression; Chapter 9: Going beyond Forward and Reverse; Chapter 10: Jacobian and Hessian Accumulation; Chapter 11: Observations on Efficiency; Chapter 12: Reversal Schedules and Checkpointing; Chapter 13: Taylor and Tensor Coefficients; Chapter 14: Differentiation without Differentiability; Chapter 15: Implicit and Iterative Differentiation; Epilogue; List of Figures; List of Tables; Assumptions and Definitions; Propositions, Corollaries, and Lemmas; Bibliography; Index}, | ||
language = {English}, | ||
publisher = {Society for Industrial and Applied Mathematic}, | ||
author = {Griewank, Andreas and Walther, Andrea}, | ||
month = sep, | ||
year = {2008}, | ||
} | ||
|
||
@incollection{neal-11, | ||
series = {Chapman \& {Hall}/{CRC} {Handbooks} of {Modern} {Statistical} {Methods}}, | ||
title = {{MCMC} {Using} {Hamiltonian} {Dynamics}}, | ||
isbn = {978-1-4200-7941-8}, | ||
url = {http://www.crcnetbase.com/doi/abs/10.1201/b10905-6}, | ||
urldate = {2014-12-08}, | ||
booktitle = {Handbook of {Markov} {Chain} {Monte} {Carlo}}, | ||
publisher = {Chapman and Hall/CRC}, | ||
author = {Neal, Radford M.}, | ||
year = {2011}, | ||
file = {Full Text PDF:/Users/perry/Zotero/storage/QZ4VX9WC/RadfordM Neal - 2011 - MCMC Using Hamiltonian Dynamics.pdf:application/pdf;Snapshot:/Users/perry/Zotero/storage/EDHGHSQ5/b10905-6.html:text/html}, | ||
} | ||
|
||
@Manual{borchers-22, | ||
title = {pracma: Practical Numerical Math Functions}, | ||
author = {Hans W. Borchers}, | ||
year = {2022}, | ||
note = {R package version 2.3.8}, | ||
url = {https://CRAN.R-project.org/package=pracma}, | ||
} | ||
|
||
@Manual{gilbert-varadhan-19, | ||
title = {numDeriv: Accurate Numerical Derivatives}, | ||
author = {Paul Gilbert and Ravi Varadhan}, | ||
year = {2019}, | ||
note = {R package version 2016.8-1.1}, | ||
url = {https://CRAN.R-project.org/package=numDeriv}, | ||
} | ||
|
||
@misc{bell-22, | ||
title = {{CppAD}: {A} {Package} for {Differentiation} of {C}++ {Algorithms}.}, | ||
url = {www.coin-or.org/CppAD}, | ||
author = {Bell, B}, | ||
year = {2022}, | ||
} | ||
|
||
@Article{kristensen-etal-16, | ||
title = {{TMB}: Automatic Differentiation and {L}aplace | ||
Approximation}, | ||
author = {Kasper Kristensen and Anders Nielsen and Casper W. Berg | ||
and Hans Skaug and Bradley M. Bell}, | ||
journal = {Journal of Statistical Software}, | ||
year = {2016}, | ||
volume = {70}, | ||
number = {5}, | ||
pages = {1--21}, | ||
doi = {10.18637/jss.v070.i05}, | ||
} | ||
|
||
|
||
@article{skaug-fournier-06, | ||
title = {Automatic approximation of the marginal likelihood in non-{Gaussian} hierarchical models}, | ||
volume = {51}, | ||
issn = {0167-9473}, | ||
url = {https://www.sciencedirect.com/science/article/pii/S0167947306000764}, | ||
doi = {10.1016/j.csda.2006.03.005}, | ||
abstract = {Fitting of non-Gaussian hierarchical random effects models by approximate maximum likelihood can be made automatic to the same extent that Bayesian model fitting can be automated by the program BUGS. The word “automatic” means that the technical details of computation are made transparent to the user. This is achieved by combining a technique from computer science known as “automatic differentiation” with the Laplace approximation for calculating the marginal likelihood. Automatic differentiation, which should not be confused with symbolic differentiation, is mostly unknown to statisticians, and hence basic ideas and results are reviewed. The computational performance of the approach is compared to that of existing mixed-model software on a suite of datasets selected from the mixed-model literature.}, | ||
language = {en}, | ||
number = {2}, | ||
urldate = {2022-06-26}, | ||
journal = {Computational Statistics \& Data Analysis}, | ||
author = {Skaug, Hans J. and Fournier, David A.}, | ||
month = nov, | ||
year = {2006}, | ||
keywords = {AD Model Builder, Automatic differentiation, Importance sampling, Laplace approximation, Mixed models, Random effects}, | ||
pages = {699--709}, | ||
file = {ScienceDirect Full Text PDF:/Users/perry/Zotero/storage/KIYRTAWE/Skaug and Fournier - 2006 - Automatic approximation of the marginal likelihood.pdf:application/pdf;ScienceDirect Snapshot:/Users/perry/Zotero/storage/RUWARWJP/S0167947306000764.html:text/html}, | ||
} | ||
|
||
@article{fournier-etal-12, | ||
title = {{AD} {Model} {Builder}: using automatic differentiation for statistical inference of highly parameterized complex nonlinear models}, | ||
volume = {27}, | ||
issn = {1055-6788}, | ||
shorttitle = {{AD} {Model} {Builder}}, | ||
url = {http://dx.doi.org/10.1080/10556788.2011.597854}, | ||
doi = {10.1080/10556788.2011.597854}, | ||
abstract = {Many criteria for statistical parameter estimation, such as maximum likelihood, are formulated as a nonlinear optimization problem. Automatic Differentiation Model Builder (ADMB) is a programming framework based on automatic differentiation, aimed at highly nonlinear models with a large number of parameters. The benefits of using AD are computational efficiency and high numerical accuracy, both crucial in many practical problems. We describe the basic components and the underlying philosophy of ADMB, with an emphasis on functionality found in no other statistical software. One example of such a feature is the generic implementation of Laplace approximation of high-dimensional integrals for use in latent variable models. We also review the literature in which ADMB has been used, and discuss future development of ADMB as an open source project. Overall, the main advantages of ADMB are flexibility, speed, precision, stability and built-in methods to quantify uncertainty.}, | ||
number = {2}, | ||
urldate = {2015-02-20}, | ||
journal = {Optimization Methods and Software}, | ||
author = {Fournier, David A. and Skaug, Hans J. and Ancheta, Johnoel and Ianelli, James and Magnusson, Arni and Maunder, Mark N. and Nielsen, Anders and Sibert, John}, | ||
month = apr, | ||
year = {2012}, | ||
pages = {233--249}, | ||
file = {Full Text PDF:/Users/perry/Zotero/storage/MTSE48QS/Fournier et al. - 2012 - AD Model Builder using automatic differentiation .pdf:application/pdf;Snapshot:/Users/perry/Zotero/storage/TMXGPVWC/10556788.2011.html:text/html}, | ||
} |