diff --git a/Makefile b/Makefile index 16ce008..30781e6 100644 --- a/Makefile +++ b/Makefile @@ -86,7 +86,8 @@ test36: .venv/py36/bin/py.test ## run tests for Python 3.6 test37: .venv/py37/bin/py.test ## run tests for Python 3.7 $(TESTENV) $< -v $(TESTOPTIONS) src tests docs/*.rst -.venv/py37/bin/sphinx-build: .venv/py37/bin/py.test + +.venv/py36/bin/sphinx-build: .venv/py36/bin/py.test docs: .venv/py36/bin/sphinx-build ## generate Sphinx HTML documentation, including API docs $(MAKE) -C docs SPHINXBUILD=../.venv/py36/bin/sphinx-build clean diff --git a/README.rst b/README.rst index dce485d..0ac6e53 100644 --- a/README.rst +++ b/README.rst @@ -26,15 +26,33 @@ The newtonprop Python package Pure Python reference implementation of the Newton propagator for quantum dynamics. -Development of `newtonprop` happens on `Github`_. +The Newton propagator evaluates an expansion of the time evolution operator +:math:`e^{-i \Op{H} dt}` or :math:`e^{\Liouville dt}` in Newton polynomials, +using an implicitly restarted Arnoldi scheme. More generally, it can evaluate +the application of any operator-valued function to a state. + +Development of the ``newtonprop`` package happens on `Github`_. You can read the full documentation at `ReadTheDocs`_. +.. Warning:: + + This is a reference implementation only. It aims to be easy to understand, + so that that it can guide the implementation of the algorithm in a compiled + language, and to provide a baseline against which to test such an + implementation. Being written in pure Python, it runs several orders of + magnitude slower than an implementation in a compiled language. Thus, it + cannot compete e.g. with the `ODE solvers provided by SciPy`_ (which run at + C speed), even though the Newton propagator is usually expected to be + superior in runtime, memory usage, and precision. + .. _ReadTheDocs: https://newtonprop.readthedocs.io/en/latest/ +.. _ODE solvers provided by SciPy: https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.integrate.ode.html Installation ------------ + To install the latest released version of ``newtonprop``, run this command in your terminal: .. code-block:: console @@ -43,14 +61,14 @@ To install the latest released version of ``newtonprop``, run this command in yo This is the preferred method to install ``newtonprop``, as it will always install the most recent stable release. -If you don't have `pip`_ installed, this `Python installation guide`_ can guide +If you don't have `pip`_ installed, the `Python installation guide`_ can guide you through the process. .. _pip: https://pip.pypa.io .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ -To install the latest development version of ``newtonprop`` from `Github`_. +To install the latest development version of ``newtonprop`` from `Github`_: .. code-block:: console diff --git a/docs/_static/mycss.css b/docs/_static/mycss.css index e5708ee..f1965eb 100644 --- a/docs/_static/mycss.css +++ b/docs/_static/mycss.css @@ -16,4 +16,24 @@ div.figure p.caption { div.section p { clear: left; -} \ No newline at end of file +} + +/* fix equation numbers in RTD */ + +span.eqno { + float: right; + margin-left: 5px; +} + +.headerlink { + display: none; + visibility: hidden; +} +.headerlink::after{ + visibility: visible; + display: inline-block; +} +.headerlink::hover{ + display: inline-block; + visibility: visible; +} diff --git a/docs/conf.py b/docs/conf.py index db64316..0ece6ca 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -55,14 +55,13 @@ def run_apidoc(_): 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig', 'sphinx.ext.todo', - 'sphinx.ext.inheritance_diagram', 'dollarmath', + 'nbsphinx', + 'sphinx.ext.inheritance_diagram', 'sphinx_autodoc_typehints', + 'sphinxcontrib.bibtex', ] -extensions.append('nbsphinx') - - if os.getenv('SPELLCHECK'): extensions += 'sphinxcontrib.spelling', spelling_show_suggestions = True @@ -76,6 +75,7 @@ def run_apidoc(_): 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None), 'numpy': ('https://docs.scipy.org/doc/numpy/', None), 'matplotlib': ('https://matplotlib.org/', None), + 'qutip': ('http://qutip.org/docs/latest/', None), } # Add any paths that contain templates here, relative to this directory. @@ -127,6 +127,8 @@ def run_apidoc(_): 'diag': ['{\\operatorname{diag}}', 0], 'abs': ['{\\operatorname{abs}}', 0], 'pop': ['{\\operatorname{pop}}', 0], + 'ee': ['{\\text{e}}', 0], + 'ii': ['{\\text{i}}', 0], 'aux': ['{\\text{aux}}', 0], 'opt': ['{\\text{opt}}', 0], 'tgt': ['{\\text{tgt}}', 0], @@ -154,6 +156,9 @@ def run_apidoc(_): 'AbsSq': ['{\\left\\vert#1\\right\\vert^2}', 1], 'Re': ['{\\operatorname{Re}}', 0], 'Im': ['{\\operatorname{Im}}', 0], + 'Real': ['{\\mathbb{R}}', 0], + 'Complex': ['{\\mathbb{C}}', 0], + 'Integer': ['{\\mathbb{N}}', 0], } } } diff --git a/docs/index.rst b/docs/index.rst index 8aabf6a..4bf4cb0 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -31,6 +31,7 @@ Welcome to the newtonprop's Python package's documentation! contributing authors history + newton example.ipynb diff --git a/docs/newton.rst b/docs/newton.rst new file mode 100644 index 0000000..9661860 --- /dev/null +++ b/docs/newton.rst @@ -0,0 +1,315 @@ +The Newton Propagator +===================== + +*The following overview has been adapted from Ref* :cite:`GoerzPhd2015`. + +Quantum mechanical equations of motion +-------------------------------------- + +The behavior of a quantum system is usually described either by the Schrödinger equation + +.. math:: + + \frac{\partial}{\partial t} \ket{\Psi(t)} = -\frac{\ii}{\hbar} \Op{H} \ket{\Psi(t)}, + +or, for open quantum systems, by the Liouville-von-Neumann equation + +.. math:: + + \frac{\partial}{\partial t} \Op{\rho}(t) = \Liouville \Op{\rho}(t). + + +ODE solvers versus series expansion +----------------------------------- + +There are two possible approaches to obtaining a solution. The first is +to simply take the equation of motion and apply one of the generic +numerical methods for solving ordinary differential equations (ODEs), +like one of the Runge-Kutta (RK) +methods :cite:`LambertODEBook,NumRecipesFortran`. + +The alternative approach is to solve the equation of motion +analytically, and then to evaluate that solution numerically. In this +way, results of arbitrary precision can be obtained, with the obvious +caveat that the propagation scheme will be specific to a particular +equation of motion and its solution. + +The Schrödinger equation for a time-independent Hamiltonian has the +solution + +.. math:: + + \Ket{\Psi(t)}= \Op{U}(t,0) \Ket{\Psi(0)} + = \ee^{-\frac{\ii}{\hbar} \Op{H}\, t} \Ket{\Psi(0)}\,. + +Similarly, the Liouville-von-Neumann equation has the solution + +.. math:: + + \Op{\rho}(t) = \DynMap(t,0) \ + = \ee^{\Liouville\, t} \Op{\rho}(0)\,. + + +For time-dependent Hamiltonians, we approximate :math:`\Op{H}(t)` as +piecewise constant on a time grid with time step :math:`dt`. Then, the +total time evolution operator is the product of the time evolution +operators at each time step, + +.. math:: + + \Op{U}[T,0] + = \prod_{i=1}^{nt-1} \underbrace{\Op{U}(t_i+dt, t_i)}_{\equiv \Op{U}_i} + = \prod_{i=1}^{nt-1} \exp\bigg[-\frac{\ii}{\hbar} + \underbrace{\Op{H}\left(t_i + \frac{dt}{2}\right)}_{\equiv \Op{H}_i} dt\bigg]\,. + +The time step :math:`dt` must be chosen sufficiently small that this is +a good approximation; in practice, convergence is checked by verifying +that the numerical results remain stable within a desired precision when +:math:`dt` is decreased. The same applies to the Liouvillian. + +For a system of non-trivial size, +:math:`\exp[-\frac{\ii}{\hbar} \Op{H} dt]` is evaluated by expanding the +exponential in a polynomial series, + +.. math:: + + \ee^{-\frac{\ii}{\hbar} \Op{H}\, dt} \Ket{\Psi} + \approx \sum_{n=0}^{N-1} a_n P_n(\Op{H}) \Ket{\Psi}\,. + \label{eq:poly_expansion} + +where :math:`P_n(\Op{H})` is a polynomial of degree :math:`n` and +:math:`\{a_n\}` are the expansion coefficients. Applying +:math:`P_n(\Op{H})` to :math:`\ket{\Psi}` then simply means repeated +applications of :math:`\Op{H}`. For this reason, an efficient +propagation relies on the proper use of sparsity in storing the +Hamiltonian and spectral methods such as the Fourier grid. + +The idea of evaluating the exponential as a polynomial series is already +presupposed by the very definition of the exponential of an operator, + +.. math:: \exp[\Op{A}] \equiv \sum_{n=0}^{\infty} \frac{1}{n!} \Op{A}^n\,. + +However, this expansion converges particularly slowly and is +numerically unstable :cite:`Tal-EzerJCP84`. Thus, it is not +suitable for time propagation. Instead, a polynomial basis must be +chosen such that the series converges quickly and can be truncated as early as +possible. + + +Chebychev expansion +------------------- + +For a function :math:`f(x)` with :math:`x \in [-1, 1]`, it can be +shown :cite:`GilBook2007` that the fastest converging +polynomial series is the expansion in Chebychev polynomials. +When using the Chebychev expansion for propagation, the requirement that +the argument of :math:`f(x)` must be real translates into :math:`\Op{H}` +being Hermitian. Secondly, to account for the requirement that +:math:`x \in [-1, 1]`, the Hamiltonian must be normalized +as :cite:`KosloffJCP88,TannorBook,NdongJCP09` + +.. math:: \Op{H}_{\text{norm}} = 2 \frac{\Op{H} - E_{\min}\,\identity}{\Delta} - \identity\,, + +where :math:`\Delta = E_{\max} - E_{\min}` is the spectral radius and +:math:`E_{\max}` and :math:`E_{\min}` are the smallest and largest +eigenvalue. + +Newton expansion +---------------- + +For a general function :math:`f(z)` with :math:`z \in \Complex`, expansion into +Chebychev polynomial is not possible. For a function defined on an operator, +a complex :math:`z` corresponds to an operator with complex eigenvalues. +This is the case for a non-Hermitian Hamiltonian, and, more importantly, for a +dissipative Liouvillian in the Liouville-von-Neumann equation. In this case, +Newton polynomials must be used. The expansion in Newton polynomials +:math:`R_n(z)` reads + +.. math:: + + f(z) \approx \sum_{n=0}^{N-1} a_n R_n(z)\,, \quad + R_n(z) = \prod_{j=0}^{n-1} \left( z-z_j \right)\,, + +for a set of sampling points :math:`\{z_j\}` at which the interpolation +is exact. The coefficients are defined as the divided differences :cite:`AshkenaziJCP95`, + +.. math:: + :label: divided_differences + + \begin{aligned} + a_0 &= f(z_0)\,, \\ + a_1 &= f(z_1) - f(z_0)\,, \\ + a_n &= \frac{f(z_n) - \sum_{j=0}^{n-1} a_j + \prod_{k=0}^{j-1}\left(z_n - z_k\right)} + {\prod_{j=0}^{n-1} \left(z_n - z_j\right)}\,.\end{aligned} + +For solving the Liouville-von Neumann equation, +:math:`f(z)=\ee^{z \,dt}`, where the argument :math:`z` is +:math:`\Liouville`. Thus, the propagation is written as + +.. math:: + + \Op{\rho} = \ee^{\Liouville \,dt} \, \Op{\rho}_0 + \approx + \underbrace{% + \sum_{n=0}^{N-1} a_n + \left( \Liouville - z_n\identity \right) }_{% + \equiv p_{N-1}(\Liouville)} + \Op{\rho}_0\,, + +where the polynomial is evaluated through repeated application the Liouvillian. + +The central issue for obtaining a fast-converging series is a proper +choice of the sampling points :math:`\{z_j\}`. The fastest convergence +results from using the complex eigenvalues of +:math:`\Liouville` :cite:`KosloffARPC94`. However, the exact +eigenvalues of the Liouvillian are not readily available. More +generally, arbitrary points from the spectral domain of +:math:`\Liouville` can be used as sampling points. + +A widely used method is to estimate the spectral domain and to encircle +it with a rectangle or ellipse :cite:`BermanJPA92,AshkenaziJCP95,HuisingaJCP99`. +Then, a large number of expansion coefficients are +calculated from sampling points on that boundary. The same coefficients +are used for the propagation of all Liouvillians on the time grid under +the assumption that they all fit into the same encirclement. The series +is truncated as soon as convergence is reached. This is similar to the +method employed for the Chebychev propagator, where a set of +coefficients is calculated once and then used for the propagation of any +Hamiltonian that is within the same spectral range. + +A middle path between the exact eigenvalues of :math:`\Liouville` and +the crude encirclement of the spectral domain is the use of the Krylov +method to obtain approximate eigenvalues. +The `Arnoldi algorithm`_ for +:math:`\hat{A} = \Liouville` and using :math:`\vec{v} = \Op{\rho}` as a starting vector +yields a set of approximate eigenvalues of :math:`\Liouville`, as well +as a Hessenberg matrix :math:`\hat{H}` that is the projection of +:math:`\Liouville` into the Krylov subspace, and the set of Arnoldi +vectors that span that subspace. Instead of using :math:`\Liouville` as +the argument of the polynomial :math:`p_{N-1}`, the Hessenberg matrix +may be used. If :math:`\hat{V}_{N}` is the transformation matrix between +the full Liouville space and the reduced Krylov space, consisting of the +Arnoldi vectors as columns, the propagation is evaluated using + +.. math:: + + \Liouville \Op{\rho} + \approx + \hat{V}_{N}\, p_{m-1}(\hat{H}) \, \hat{V}_{N}^{\dagger} \Op{\rho}_0\,. + +Assuming :math:`N` is much smaller than the full dimension of the +Liouville space, most of the numerical effort is in the Arnoldi +algorithm, in constructing the Krylov space. + +.. _Arnoldi algorithm: https://en.m.wikipedia.org/wiki/Arnoldi_iteration + + +Newton propagation with an implicitly restarted Arnoldi method +-------------------------------------------------------------- + +However, even for moderate values of :math:`N` (typically on the order +of 100), the Arnoldi algorithm can require prohibitive amounts of +memory. This is because a full set of :math:`N` Arnoldi vectors, each of +the dimension of the Liouville space, needs to be stored. To counter this +problem, an iterative scheme has been +developed :cite:`Tal-EzerSJSC2007`. Instead of performing +the Arnoldi algorithm to a high order :math:`N`, until convergence is +reached in the propagation, we stop at some small order :math:`m<10`. +This gives a first approximation to the propagated density matrix, + +.. math:: + :label: newton1stIter + + \Op{\rho}^{(1)} + = p_{m-1}^{(0)}(\Liouville) \Op{\rho}_0 + = \sum_{n=0}^{m-1} a_n R_n(\Liouville) \Op{\rho}_0\,. + \label{eq:newton1stIter} + +The idea is now to iteratively add remaining terms to the Newton series +in chunks of size :math:`m`, retaining all coefficients and sampling +points, but restarting the Arnoldi procedure in every iteration. + +Adding the next :math:`m` terms to Eq. :eq:`newton1stIter` yields + +.. math:: + + \begin{split} + \Op{\rho}^{(2)} + &= \Op{\rho}^{(1)} + \sum_{n=m}^{2m-1} a_n R_n(\Liouville) \Op{\rho}_{0} \\ + &= \Op{\rho}^{(1)} + + \underbrace{\left(\sum_{n=0}^{m-1} a_{m+n} R_n^{(1)}(\Liouville)\right)}_{% + \equiv p_{m-1}^{(1)} } + \underbrace{\left(R_{m}^{(0)}(\Liouville) \Op{\rho}_{0} \right)}_{% + \equiv \Op{\sigma}^{(1)} }\,, + \end{split} + +with + +.. math:: + + R_n^{(0)}(\Liouville) = \prod_{j=0}^{n-1}(\Liouville - z_{j}\identity), \qquad + R_n^{(1)}(\Liouville) = \prod_{j=0}^{n-1}(\Liouville - z_{n+j}\identity)\,. + +That is, the terms in :math:`R_n(\Liouville)` already known from the +calculation of :math:`\Op{\rho}^{(1)}` have been pulled out, and yield a +new "starting vector" :math:`\Op{\sigma}^{(1)}`, which is the argument +to a Newton series of only :math:`m` new terms. The new sampling points +on which the :math:`R_n^{(1)}` are evaluated are obtained by applying +the Arnoldi procedure to :math:`\Op{\sigma}^{(1)}`. The Newton +coefficients continue recursively from the previous restart. The third +iteration yields + +.. math:: + + \Op{\rho}^{(3)} + = \Op{\rho}^{(2)} + + \underbrace{\left(\sum_{n=0}^{m-1} a_{2m+n} R_n^{(2)}(\Liouville)\right)}_{% + \equiv p_{m-1}^{(2)} } + \underbrace{\left(R_{m}^{(1)}(\Liouville) \Op{\sigma}_{1} \right)}_{% + \equiv \Op{\sigma}^{(2)} }\,. + +The Newton propagator continues, adding the :math:`m` terms evaluating + +.. math:: + + p_{m-1}^{(s)}(\Liouville) \Op{\sigma}^{(s)} + = \sum_{n=0}^{m-1} a_{sm + n} + \prod_{k=0}^{n-1} \left(\Liouville - z_{sm+k} \identity \right) + \Op{\sigma}^{(s)} + +with + +.. math:: \Op{\sigma}^{(s)} = p_{m-1}^{(s-1)} \Op{\sigma}^{(s-1)} + +at every restart iteration. + +In the implementation of the algorithm, there are two details that need +to be taken into account for numerical stability. First, the denominator +of the divided differences in Eq. :eq:`divided_differences` may become extremely small if +consecutive sampling points are close to each other. This can be +addressed by reordering the points such that the denominator in the +divided differences is maximized. This process is called Leja +ordering :cite:`ReichelBIT1990`. The reverse problem that +the sampling points are too far apart, causing an underflow in the +calculation of coefficients can be avoided by normalizing the +Liouvillian as + +.. math:: \tilde{\Liouville} = \frac{1}{\rho} \left( \Liouville - c \right)\,, + +where :math:`c` is an estimate for the center of the spectrum of +:math:`\Liouville`, and the eigenvalues are roughly contained in a +radius :math:`\rho` around :math:`c`. These values can be estimated from +the sampling points obtained in the first iteration of the Newton +propagator. The normalization of the Liouvillian is in some sense +similar to the normalization of the Hamiltonian in the Chebychev +propagator, but it is crucial there since the Chebychev polynomials are +only defined in the domain :math:`[-1, 1]`. For the Newton propagator, +the normalization is only for numerical stability. + +References +---------- + +.. bibliography:: refs.bib + :cited: + :style: unsrt diff --git a/docs/refs.bib b/docs/refs.bib new file mode 100644 index 0000000..a2c6b94 --- /dev/null +++ b/docs/refs.bib @@ -0,0 +1,222 @@ +@string{aamop = {Adv. At. Mol. Opt. Phys.}} +@string{aarc = {Autom. Rem. Contr.}} +@string{ac = {Anal. Chem.}} +@string{ajp = {Am. J. Phys.}} +@string{ap = {Adv. Phys.}} +@string{apb = {Appl. Phys. B}} +@string{apx = {Adv. Phys. X}} +@string{arpc = {Annu. Rev. Phys. Chem.}} +@string{atms = {ACM Trans. Math. Softw.}} +@string{bstj = {Bell System Tech. J.}} +@string{cacm = {Commun. ACM}} +@string{cmp = {Commun. Math. Phys.}} +@string{computj = {Comput. J.}} +@string{contp = {Contemp. Phys.}} +@string{cp = {Chem. Phys.}} +@string{cpam = {Commun. Pur. Appl. Math.}} +@string{cpl = {Chem. Phys. Lett.}} +@string{epjb = {Eur. Phys. J. B}} +@string{epjd = {Eur. Phys. J. D}} +@string{epjp = {Eur. Phys. J. Plus}} +@string{epjqt = {EPJ Quantum Technol.}} +@string{epl = {Europhys. Lett.}} +@string{farad = {Faraday Disc.}} +@string{foundphys = {Found. Phys.}} +@string{fp = {Fortschr. Phys.}} +@string{ijtp = {Int. J. Theor. Phys.}} +@string{imajam = {IMA J. Appl. Math.}} +@string{itas = {IEEE Trans. on Appl. Superc.}} +@string{jap = {J. Appl. Phys.}} +@string{jcmpp = {J. Comput. Phys.}} +@string{jcp = {J. Chem. Phys.}} +@string{jcpm = {J. Phys. Condens. Matter}} +@string{jcss = {J. Comput. System Sci.}} +@string{jctn = {J. Comput. Theor. Nanos.}} +@string{jlum = {J. Lumin.}} +@string{jmo = {J. Mod. Opt.}} +@string{jmp = {J. Math. Phys.}} +@string{jmr = {J. Magnet. Res.}} +@string{jmra = {J. Magnet. Res. A}} +@string{job = {J. Optics B}} +@string{josab = {J. Opt. Soc. Am. B}} +@string{jota = {J. Optim. Theor. Appl.}} +@string{jpa = {J. Phys. A}} +@string{jpb = {J. Phys. B}} +@string{jpc = {J. Phys. Chem.}} +@string{jpca = {J. Phys. Chem. A}} +@string{jpcm = {J. Phys.: Condens. Matter}} +@string{mc = {Math. Comput.}} +@string{nams = {Notices Amer. Math. Soc.}} +@string{nat = {Nature}} +@string{natcom = {Nature Commun.}} +@string{natnano = {Nature Nano.}} +@string{natphot = {Nature Photon.}} +@string{natphys = {Nature Phys.}} +@string{njp = {New J. Phys.}} +@string{oc = {Opt. Comm.}} +@string{os = {Opt. Spectr.}} +@string{physd = {Physica D}} +@string{physrep = {Phys. Rep.}} +@string{pl = {Phys. Lett.}} +@string{pla = {Phys. Lett. A}} +@string{plms = {Proc. Lond. Math. Soc.}} +@string{pnas = {Proc. Natl. Acad. Sci.}} +@string{pr = {Phys. Rev.}} +@string{pra = {Phys. Rev. A}} +@string{prb = {Phys. Rev. B}} +@string{prc = {Phys. Rev. C}} +@string{prd = {Phys. Rev. D}} +@string{pre = {Phys. Rev. E}} +@string{prl = {Phys. Rev. Lett.}} +@string{prsa = {Proc. R. Soc. A}} +@string{prx = {Phys. Rev. X}} +@string{ps = {Phys. Scripta}} +@string{ptrsa = {Phil. Trans. R. Soc. A}} +@string{qam = {Q. Appl. Math.}} +@string{qic = {Quantum Info. Comput.}} +@string{qip = {Quantum Inf. Process.}} +@string{qso = {Quantum Semiclass. Opt.}} +@string{qst = {Quantum Sci. Technol.}} +@string{rmp = {Rev. Mod. Phys.}} +@string{rpp = {Rep. Prog. Phys.}} +@string{rsi = {Rev. Sci. Instr.}} +@string{sci = {Science}} +@string{sciam = {Sci. Am.}} +@string{siamjc = {SIAM J. Comput.}} +@string{siamjsc = {SIAM J. Sci. Comput.}} +@string{siamrev = {SIAM Rev.}} +@string{sp = {Sig. Process.}} +@string{sr = {Sci. Rep.}} +@string{sst = {Supercond. Sci. Technol.}} +@string{zp = {Z. Phys.}} + + +@phdthesis{GoerzPhd2015, + Author = {Michael Goerz}, + School = {Universit{\"a}t Kassel}, + Title = {Optimizing Robust Quantum Gates in Open Quantum Systems}, + Year = {2015}, + url = {https://kobra.bibliothek.uni-kassel.de/handle/urn:nbn:de:hebis:34-2015052748381}, +} + + +@book{LambertODEBook, + Address = {New York, NY}, + Author = {Lambert, J. D.}, + Publisher = {John Wiley \& Sons}, + Title = {Numerical Methods for Ordinary Differential Systems: The Initial Value Problem}, + Year = {1991}} + + +@book{NumRecipesFortran, + Address = {New York, NY}, + Author = {Press, William H. and Teukolsky, Saul A. and Vetterling, William T. and Flannery, Brian P.}, + Publisher = {Cambridge University Press}, + Title = {Numerical Recipes in FORTRAN; The Art of Scientific Computing}, + Year = {1993}} + + +@article{Tal-EzerJCP84, + Author = {Tal-Ezer, H. and Kosloff, R.}, + Journal = jcp, + Number = {9}, + Pages = {3967}, + Title = {An Accurate and Efficient Scheme for Propagating the Time Dependent {Schrödinger} Equation}, + Volume = {81}, + Year = {1984}} + + + +@book{GilBook2007, + Author = {Gil, A. and Segura, J. and Temme, N.}, + Publisher = {Society for Industrial and Applied Mathematics}, + Title = {Numerical Methods for Special Functions}, + Year = {2007}} + + +@article{KosloffJCP88, + Author = {Kosloff, Ronnie}, + Journal = jcp, + Number = {8}, + Pages = {2087}, + Title = {Time-Dependent Quantum-Mechanical Methods for Molecular Dynamics}, + Volume = {92}, + Year = {1988}} + + +@book{TannorBook, + Address = {Sausalito, California}, + Author = {Tannor, David J.}, + Publisher = {University Science Books}, + Title = {Introduction to Quantum Mechanics: A Time-Dependent Perspective}, + Year = {2007}} + + +@article{NdongJCP09, + Author = {Ndong, Mamadou and Tal-Ezer, Hillel and Kosloff, Ronnie and Koch, Christiane P.}, + Journal = jcp, + Number = {12}, + Pages = {124108}, + Title = {A Chebychev propagator for inhomogeneous {Schrödinger} equations}, + Volume = {130}, + Year = {2009}} + + +@article{AshkenaziJCP95, + Author = {Ashkenazi, Guy and Kosloff, Ronnie and Ruhman, Sanford and Tal-Ezer, Hillel}, + Journal = jcp, + Number = {23}, + Pages = {10005}, + Title = {Newtonian propagation methods applied to the photodissociation dynamics of {I₃₋}}, + Volume = {103}, + Year = {1995}} + + +@article{KosloffARPC94, + Author = {Kosloff, R}, + Journal = arpc, + Number = {1}, + Pages = {145}, + Title = {Propagation Methods for Quantum Molecular Dynamics}, + Volume = {45}, + Year = {1994}} + + +@article{BermanJPA92, + Author = {Berman, M and Kosloff, R and Tal-Ezer, H}, + Journal = jpa, + Number = {5}, + Pages = {1283}, + Title = {Solution of the time-dependent {Liouville-von Neumann} equation: dissipative evolution}, + Volume = {25}, + Year = {1992}} + + +@article{HuisingaJCP99, + Author = {Huisinga, Wilhelm and Pesce, Lorenzo and Kosloff, Ronnie and Saalfrank, Peter}, + Journal = jcp, + Number = {12}, + Pages = {5538}, + Title = {{Faber} and {Newton} polynomial integrators for open-system density matrix propagation}, + Volume = {110}, + Year = {1999}} + + +@article{Tal-EzerSJSC2007, + Author = {Tal-Ezer, H.}, + Journal = siamjsc, + Number = {6}, + Pages = {2426}, + Title = {On Restart and Error Estimation for {Krylov} Approximation of w=f(A)v}, + Volume = {29}, + Year = {2007}} + + +@article{ReichelBIT1990, + Author = {Reichel, Lothar}, + Journal = {BIT}, + Number = {2}, + Pages = {332}, + Title = {{Newton} interpolation at {Leja} points}, + Volume = {30}, + Year = {1990}} diff --git a/docs/rtd_environment.yml b/docs/rtd_environment.yml index 56cbc18..6b16cb6 100644 --- a/docs/rtd_environment.yml +++ b/docs/rtd_environment.yml @@ -4,7 +4,6 @@ channels: - conda-forge dependencies: - python=3.6 - - qutip - pip: - better-apidoc>=0.2.0 - sphinx_autodoc_typehints diff --git a/setup.py b/setup.py index 0c3d66b..689ecf0 100644 --- a/setup.py +++ b/setup.py @@ -29,11 +29,11 @@ def get_version(filename): dev_requirements = [ 'coverage', 'pytest', 'pytest-cov', 'pytest-xdist', 'twine', 'pep8', 'flake8', 'wheel', 'sphinx', 'sphinx-autobuild', 'sphinx_rtd_theme', - 'sphinx-autodoc-typehints', 'gitpython', ] + 'sphinx-autodoc-typehints', 'gitpython', 'sphinxcontrib-bibtex', + 'jupyter', 'nbval', 'nbsphinx', 'watermark', 'matplotlib', +] dev_requirements.append('better-apidoc') -dev_requirements.extend([ - 'jupyter', 'nbval', 'nbsphinx', 'watermark', 'matplotlib']) version = get_version('./src/newtonprop/__init__.py') diff --git a/src/newtonprop/propagator.py b/src/newtonprop/propagator.py index de02379..ef568ab 100644 --- a/src/newtonprop/propagator.py +++ b/src/newtonprop/propagator.py @@ -2,7 +2,7 @@ import logging import numpy as np -__all__ = ['step'] +__all__ = ["step"] def _arnoldi(A, dt, v0, m_max, inner=np.vdot, norm=np.linalg.norm): @@ -27,7 +27,9 @@ def _arnoldi(A, dt, v0, m_max, inner=np.vdot, norm=np.linalg.norm): m = m_max # Hessenberg matrix (at maximum size) - Hess = np.matrix(np.zeros(shape=(m_max+1, m_max+1), dtype=np.complex128)) + Hess = np.matrix( + np.zeros(shape=(m_max + 1, m_max + 1), dtype=np.complex128) + ) # Eigenvalues of all Hess Ritz = [] @@ -35,21 +37,22 @@ def _arnoldi(A, dt, v0, m_max, inner=np.vdot, norm=np.linalg.norm): arnoldi_vecs = [] beta = norm(v0) - if (abs(beta-1.0) > 1.0e-10): + if abs(beta - 1.0) > 1.0e-10: print("beta = ", beta) raise AssertionError( - "v0 must have norm 1.0. Mismatch between `inner` and `norm`?") + "v0 must have norm 1.0. Mismatch between `inner` and `norm`?" + ) v = v0 / beta arnoldi_vecs.append(v) for j in range(m): v = A(v) # v_{j+1} for i, v_i in enumerate(arnoldi_vecs): Hess[i, j] = dt * inner(v_i, v) - v = v - (Hess[i, j]/dt) * v_i + v = v - (Hess[i, j] / dt) * v_i # At this point, we have finished the (j+1) x (j+1) Hessenberg matrix - Ritz.extend(np.linalg.eigvals(Hess[:j+1, :j+1])) + Ritz.extend(np.linalg.eigvals(Hess[: j + 1, : j + 1])) h_next = norm(v) - Hess[j+1, j] = h_next * dt + Hess[j + 1, j] = h_next * dt if h_next <= 1.0e-14: # abort early at convergence m = j break @@ -57,7 +60,7 @@ def _arnoldi(A, dt, v0, m_max, inner=np.vdot, norm=np.linalg.norm): arnoldi_vecs.append(v) # At this point, arnoldi_vecs contains m+1 elements Ritz = np.array(Ritz, dtype=np.complex128) - return arnoldi_vecs, Hess[:m+1, :m+1], Ritz, m + return arnoldi_vecs, Hess[: m + 1, : m + 1], Ritz, m def _normalize_points(z): @@ -70,7 +73,7 @@ def _normalize_points(z): # we need to enlarge the radius a little bit to account for points that # will be added in later iterations r *= 1.2 # arbitary factor - assert(r > 0.0), "Radius is zero" + assert r > 0.0, "Radius is zero" return r @@ -80,7 +83,7 @@ def _extend_newton_coeffs(func, old_a, new_leja, center, radius): """ n_old = len(old_a) m = len(new_leja) - n_old - a = np.zeros(n_old+m, dtype=np.complex128) + a = np.zeros(n_old + m, dtype=np.complex128) a[:n_old] = old_a n0 = n_old @@ -88,16 +91,16 @@ def _extend_newton_coeffs(func, old_a, new_leja, center, radius): a[0] = func(new_leja[0]) n0 = 1 - for k in range(n0, n_old+m): + for k in range(n0, n_old + m): d = 1.0 pn = 0.0 for n in range(1, k): # 1..k-1 - zd = new_leja[k] - new_leja[n-1] + zd = new_leja[k] - new_leja[n - 1] d *= zd / radius pn += a[n] * d - zd = new_leja[k] - new_leja[k-1] + zd = new_leja[k] - new_leja[k - 1] d *= zd / radius - assert(abs(d) > 1.0e-200), "Divided differences too small" + assert abs(d) > 1.0e-200, "Divided differences too small" a[k] = (func(new_leja[k]) - a[0] - pn) / d return a @@ -113,8 +116,8 @@ def _extend_leja(old_leja, new_points, n_use): if n_old == 0: # At the very beginning, start with the point that has largest absolute # value - for i in range(len(new_points)-1): # 0 .. n_old - 2 - if (abs(new_points[i]) > abs(new_points[-1])): + for i in range(len(new_points) - 1): # 0 .. n_old - 2 + if abs(new_points[i]) > abs(new_points[-1]): temp = new_points[i] new_points[i] = new_points[-1] new_points[-1] = temp @@ -122,32 +125,41 @@ def _extend_leja(old_leja, new_points, n_use): i_add_start = 1 # find the best point for new_leja[n_old+n] n_added = i_add_start - ex = 1.0/(n_old + n_use) + ex = 1.0 / (n_old + n_use) for i_add in range(i_add_start, n_use): p_max = 0.0 i_max = 0 # the new leja are defined with index 0 .. (n_old-1)+n # the new candidates are defined with index 0 .. len(new_points)-1+n - for i in range(len(new_points)-i_add): # trial points (candidates) + for i in range(len(new_points) - i_add): # trial points (candidates) p = 1.0 for j in range(n_old + i_add): # existing leja points - p *= np.abs(new_points[i] - new_leja[j])**ex + p *= np.abs(new_points[i] - new_leja[j]) ** ex # at this point p is the divided difference denominator for the # candidate with index i if p > p_max: p_max = p i_max = i # XXX if p_max below limit: abort - new_leja[n_old+i_add] = new_points[i_max] + new_leja[n_old + i_add] = new_points[i_max] n_added += 1 # remove the used point by moving in the last point - new_points[i_max] = new_points[len(new_points)-1-i_add] + new_points[i_max] = new_points[len(new_points) - 1 - i_add] return new_leja, n_added def step( - A, v, dt, func=None, m_max=10, maxrestart=100, tol=1e-12, - inner=None, norm=None, zero=None): + A, + v, + dt, + func=None, + m_max=10, + maxrestart=100, + tol=1e-12, + inner=None, + norm=None, + zero=None, +): r"""Evaluate $f(A\,dt)\,v$, for example $e^{-i\,A\,dt}\,v$ for the propagation of a quantum state. @@ -213,8 +225,11 @@ def step( Mathematically, `state` must be an element a Hilbert space (a "complete inner product space"). This includes density matrices, which can be interpreted as elements of a Hilbert space provided one chooses an - appropriate norm and inner product. The norm *must* be the one - induced by the inner product, + appropriate norm and inner product. + + .. Warning:: + + The norm *must* be the one induced by the inner product, .. math:: @@ -226,9 +241,12 @@ def step( product. If the operator norm is used, density matrices are not elements of a Hilbert space, but of a C* algebra, which would not allow for the evaluation of the propagator. + + A mismatch between `norm` and `inner` leads to subtle errors that will + not be obvious (e.g., a substantial lack of precision) """ - logger = logging.getLogger('newton') + logger = logging.getLogger("newton") if inner is None: inner = np.vdot @@ -241,13 +259,13 @@ def step( try: N = np.prod(v.shape) - assert(m_max <= N), "m_max must be smaller than the system dimension" + assert m_max <= N, "m_max must be smaller than the system dimension" except AttributeError: pass - w = zero(v) # result vector - Z = np.zeros(0, dtype=np.complex128) # Leja points - a = np.zeros(0, dtype=np.complex128) # Newton coeffs + w = zero(v) # result vector + Z = np.zeros(0, dtype=np.complex128) # Leja points + a = np.zeros(0, dtype=np.complex128) # Newton coeffs beta = norm(v) v = v / beta @@ -255,10 +273,15 @@ def step( for s in range(maxrestart): arnoldi_vecs, Hess, Ritz, m = _arnoldi( - A, dt, v, m_max, inner=inner, norm=norm) + A, dt, v, m_max, inner=inner, norm=norm + ) if m < m_max: - logger.warn("Arnoldi only returned order %d instead of the " - "requested %d", m, m_max) + logger.warn( + "Arnoldi only returned order %d instead of the " + "requested %d", + m, + m_max, + ) if m == 0 and s == 0: # The input state appears to be an eigenstate eig_val = beta * Hess[0, 0] @@ -270,20 +293,20 @@ def step( if s == 0: radius = _normalize_points(Ritz) center = 0.0 - assert(radius > 0.0), "Radius is zero" + assert radius > 0.0, "Radius is zero" # get Leja points (i.e. Ritz points in the proper order) n_s = len(Z) Z, m = _extend_leja(Z, Ritz, m) # Z now contains m new Leja points - assert(m > 0), "No new Leja points" + assert m > 0, "No new Leja points" a = _extend_newton_coeffs(func, a, Z, center, radius) - R = np.matrix(np.zeros(shape=(m+1, 1), dtype=np.complex128)) + R = np.matrix(np.zeros(shape=(m + 1, 1), dtype=np.complex128)) R[0, 0] = beta P = a[n_s] * R for k in range(1, m): # 1..m-1 - R = (np.dot(Hess, R) - Z[n_s+k-1] * R) / radius - P += a[n_s+k] * R + R = (np.dot(Hess, R) - Z[n_s + k - 1] * R) / radius + P += a[n_s + k] * R wp = zero(v) for i in range(m): # 0 .. m-1 @@ -292,29 +315,30 @@ def step( w += wp # starting vector for next iteration - R = (np.dot(Hess, R) - Z[n_s+m-1] * R) / radius + R = (np.dot(Hess, R) - Z[n_s + m - 1] * R) / radius beta = np.linalg.norm(R) R /= beta # beta would be the norm of v, with the above normalization, v will now # be normalized v = zero(v) - for i in range(m+1): # 0 .. m + for i in range(m + 1): # 0 .. m v += R[i, 0] * arnoldi_vecs[i] - if (beta*abs(a[-1])/(1+norm(w)) < tol): + if beta * abs(a[-1]) / (1 + norm(w)) < tol: if logger.isEnabledFor(logging.DEBUG): # pragma: nocover logger.debug("Converged at restart %s", s) logger.debug("norm of wp : %s", norm(wp)) logger.debug("norm of w : %s", norm(w)) logger.debug("beta : %s", beta) logger.debug( - "|R*a[-1]|/|w| : %s", np.linalg.norm(R) * a[-1] / norm(w)) + "|R*a[-1]|/|w| : %s", np.linalg.norm(R) * a[-1] / norm(w) + ) logger.debug("max Leja radius: %s", np.max(np.abs(Z))) break try: - assert(not np.isnan(np.sum(v))), "v contains NaN" - assert(not np.isnan(np.sum(w))), "w contains NaN" + assert not np.isnan(np.sum(v)), "v contains NaN" + assert not np.isnan(np.sum(w)), "w contains NaN" except (AttributeError, TypeError): pass