diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml new file mode 100644 index 00000000..3494a9f1 --- /dev/null +++ b/.JuliaFormatter.toml @@ -0,0 +1,3 @@ +style = "sciml" +format_markdown = true +format_docstrings = true diff --git a/.buildkite/.gitignore b/.buildkite/.gitignore new file mode 100644 index 00000000..46de5d5e --- /dev/null +++ b/.buildkite/.gitignore @@ -0,0 +1 @@ +ssh_deploy.key diff --git a/.buildkite/0_webui.yml b/.buildkite/0_webui.yml new file mode 100644 index 00000000..af44a7d7 --- /dev/null +++ b/.buildkite/0_webui.yml @@ -0,0 +1,28 @@ +agents: + queue: "juliaecosystem" + sandbox.jl: true + +steps: + - label: ":unlock: Launch tutorials build if hash check successful" + branches: "!gh-pages" + plugins: + - staticfloat/cryptic#v2: + signed_pipelines: + - pipeline: .buildkite/launch_tutorials.yml + signature_file: .buildkite/launch_tutorials.yml.signature + inputs: + - .buildkite/run_tutorial.yml + - .buildkite/publish_tutorials_output.sh + allow_hash_override: true + command: "true" + + - label: ":runner: Dynamically launch test suite" + plugins: + - staticfloat/forerunner: + # This will create one job overall, throwing all path information away + watch: + - "src/**/*.jl" + - "src/*.jl" + - "**/*.toml" + target: .buildkite/test_sciml.yml + target_type: simple diff --git a/.buildkite/cryptic_repo_keys/.gitignore b/.buildkite/cryptic_repo_keys/.gitignore new file mode 100644 index 00000000..f84d0896 --- /dev/null +++ b/.buildkite/cryptic_repo_keys/.gitignore @@ -0,0 +1,7 @@ + +# Ignore the unencrypted repo_key +repo_key + +# Ignore any agent keys (public or private) we have stored +agent_key* + diff --git a/.buildkite/cryptic_repo_keys/repo_key.2297e5e7 b/.buildkite/cryptic_repo_keys/repo_key.2297e5e7 new file mode 100644 index 00000000..6065ce29 Binary files /dev/null and b/.buildkite/cryptic_repo_keys/repo_key.2297e5e7 differ diff --git a/.buildkite/launch_test_sciml.yml b/.buildkite/launch_test_sciml.yml new file mode 100644 index 00000000..85079b4a --- /dev/null +++ b/.buildkite/launch_test_sciml.yml @@ -0,0 +1,16 @@ +agents: + queue: "juliaecosystem" + sandbox.jl: true + +steps: + - label: ":runner: Dynamically launch test_sciml" + branches: "!gh-pages" + plugins: + - staticfloat/forerunner: + # This will create one job overall, throwing all path information away + watch: + - "src/**/*.jl" + - "src/*.jl" + - "**/*.toml" + target: .buildkite/test_sciml.yml + target_type: simple diff --git a/.buildkite/launch_tutorials.yml b/.buildkite/launch_tutorials.yml new file mode 100644 index 00000000..81eebe74 --- /dev/null +++ b/.buildkite/launch_tutorials.yml @@ -0,0 +1,19 @@ +agents: + queue: "juliaecosystem" + sandbox.jl: true + +steps: + - label: ":runner: Dynamically launch run_tutorial.yml" + branches: "!gh-pages" + env: + BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET: ${BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET?} + depends_on: + plugins: + - staticfloat/forerunner: + # This will create one job per project + watch: + - tutorials/**/*.jmd + - tutorials/**/*.toml + path_processor: .buildkite/path_processors/project-coalescing + target: .buildkite/run_tutorial.yml + target_type: template \ No newline at end of file diff --git a/.buildkite/launch_tutorials.yml.signature b/.buildkite/launch_tutorials.yml.signature new file mode 100644 index 00000000..8f286f74 --- /dev/null +++ b/.buildkite/launch_tutorials.yml.signature @@ -0,0 +1,2 @@ +Salted__ +HX+D;HN2qhb=c$J0~0~dх3A܉YrB{?󒟭z P \ No newline at end of file diff --git a/.buildkite/path_processors/project-coalescing b/.buildkite/path_processors/project-coalescing new file mode 100755 index 00000000..26a0f5d8 --- /dev/null +++ b/.buildkite/path_processors/project-coalescing @@ -0,0 +1,62 @@ +#!/bin/bash + +# When a `.jmd` file is modified, it gets rewritten by itself; but when a `.toml` file +# (such as a `Project.toml` or a `Manifest.toml`) gets modified, we rebuild the entire +# directory. To avoid double-building, we coalesce all changes here, by converting +# changed files that end in `.toml` to their directory, then dropping all other files +# within that folder. + +# This will hold all files that need to be rebuilt, keyed by path and pointing to their +# containing project +declare -A FILES + +# This will hold all projects that need to be rebuilt, and will allow us to suppress +# values from FILES_TO_RUN +declare -A PROJECTS + +# Helper function to find the directory that contains the `Project.toml` for this file +function find_project() { + d="${1}" + # We define a basecase, that the path must begin with `tutorials` and is not allowed + # to move outside of that subtree. + while [[ "${d}" =~ tutorials/.* ]]; do + if [[ -f "${d}/Project.toml" ]]; then + echo "${d}" + return + fi + d="$(dirname "${d}")" + done +} + +# For each file, find its project, then if its a `.jmd` file, we add it to `FILES` +# If it's a `.toml` file, we add it to `PROJECTS`. +for f in "$@"; do + proj=$(find_project "${f}") + if [[ -z "${proj}" ]]; then + buildkite-agent annotate "Unable to find project for ${f}" --style "error" + continue + fi + + if [[ "${f}" == *.jmd ]]; then + FILES["${f}"]="${proj}" + elif [[ "${f}" == *.toml ]]; then + PROJECTS["${proj}"]=1 + else + buildkite-agent annotate "Unknown weave type for file ${f}" --style "error" + fi +done + +# We're going to emit the project directories first: +BUILD_TARGETS="${!PROJECTS[@]}" + +# But we're also going to emit any single files whose projects are _not_ contained +# in the projects we're already building +for f in "${!FILES[@]}"; do + proj=${FILES[$f]} + if ! [ ${PROJECTS[$proj]+x} ]; then + BUILD_TARGETS="${BUILD_TARGETS} ${f}" + fi +done + +# Output the build targets +echo "${BUILD_TARGETS}" diff --git a/.buildkite/publish_tutorials_output.sh b/.buildkite/publish_tutorials_output.sh new file mode 100755 index 00000000..c4b0535f --- /dev/null +++ b/.buildkite/publish_tutorials_output.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Ensure that our git wants to talk to github without prompting +mkdir -p ~/.ssh +ssh-keyscan github.com >> ~/.ssh/known_hosts +git config --global user.email "buildkite@julialang.org" +git config --global user.name "SciML Tutorials CI" + +# Clone SciMLTutorialsOutput to temporary directory +temp_dir=$(mktemp -d) +git -C "${temp_dir}" clone git@github.com:SciML/SciMLTutorialsOutput . + +# Copy our output artifacts into it: +for d in docs html notebook pdf script markdown; do + cp -vRa "${d}/" "${temp_dir}" +done +cp -va *.md *.bib "${temp_dir}" + +# Commit the result up to output +set -e +git -C "${temp_dir}" add . +git -C "${temp_dir}" commit -m "Automatic build\nPublished by build of: ${BUILDKITE_REPO%.git}/commit/${BUILDKITE_COMMIT}" +git -C "${temp_dir}" push + +rm -rf "${temp_dir}" diff --git a/.buildkite/run_tutorial.yml b/.buildkite/run_tutorial.yml new file mode 100644 index 00000000..1bf05b99 --- /dev/null +++ b/.buildkite/run_tutorial.yml @@ -0,0 +1,94 @@ +agents: + queue: "juliaecosystem" + sandbox.jl: true + arch: "x86_64" + +# This is a pipeline that weaves a tutorial, then uploads the resultant +# .PDF and other reports as (buildkite, not Julia) artifacts. The `coppermind` +# configuration memoizes the result, so that identical inputs don't get +# weaved multiple times. +steps: + - label: ":hammer: {PATH}" + key: "tutorial-{SANITIZED_PATH}" + env: + BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET: ${BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET?} + plugins: + - staticfloat/cryptic#v2: + variables: + - BUILDKITE_S3_ACCESS_KEY_ID="U2FsdGVkX1/ckce1vUF8A17rHLxcAlAou4aokaeS8YL6omsA1Vq1IDZko5cL1Z+t" + - BUILDKITE_S3_SECRET_ACCESS_KEY="U2FsdGVkX1+SPF81nkK7KQ64DsafSl0qq2iG7BsQs1xlTYEtZV3MqQl3l/NWaiocaEywZZFbAB5zpnKPD0xHTQ==" + - BUILDKITE_S3_DEFAULT_REGION="U2FsdGVkX1/cORlxhXcxhja2JkqC0f8RmaGYxvGBbEg=" + - JuliaCI/julia#v1: + version: 1.8 + - staticfloat/sandbox: + rootfs_url: "https://jc-rootfs-images.s3.amazonaws.com/aws_uploader-2021-11-12.x86_64.tar.gz" + rootfs_treehash: "986217e5b36efd3b3b91ed90df8e36d628cf543f" + workspaces: + # Include the julia we just downloaded + - "/cache/julia-buildkite-plugin:/cache/julia-buildkite-plugin" + - staticfloat/coppermind#v1: + inputs: + # We are sensitive to the actual tutorial changing + - {PATH} + # We are sensitive to the source code of this package changing + - src/**/*.jl + # We are sensitive to our overall dependencies changing + - ./*.toml + outputs: + #- html/**/*.html + - markdown/**/figures/*.png + - markdown/**/*.md + - notebook/**/*.ipynb + - pdf/**/*.pdf + - script/**/*.jl + s3_prefix: s3://julialang-buildkite-artifacts/scimltutorials + timeout_in_minutes: 1000 + commands: | + # Instantiate, to install the overall project dependencies + echo "--- Instantiate" + julia --project=. -e 'using Pkg; Pkg.instantiate(); Pkg.build()' + + # Run tutorial + echo "+++ Run tutorial for {PATH}" + julia --project=. weave_tutorials.jl "{PATH}" + + - label: ":rocket: Publish {PATH}" + env: + BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET: ${BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET?} + plugins: + - staticfloat/cryptic#v2: + variables: + - BUILDKITE_S3_ACCESS_KEY_ID="U2FsdGVkX1/ckce1vUF8A17rHLxcAlAou4aokaeS8YL6omsA1Vq1IDZko5cL1Z+t" + - BUILDKITE_S3_SECRET_ACCESS_KEY="U2FsdGVkX1+SPF81nkK7KQ64DsafSl0qq2iG7BsQs1xlTYEtZV3MqQl3l/NWaiocaEywZZFbAB5zpnKPD0xHTQ==" + - BUILDKITE_S3_DEFAULT_REGION="U2FsdGVkX1/cORlxhXcxhja2JkqC0f8RmaGYxvGBbEg=" + files: + - .buildkite/ssh_deploy.key + - JuliaCI/julia#v1: + version: 1.8 + - staticfloat/sandbox: + rootfs_url: "https://jc-rootfs-images.s3.amazonaws.com/aws_uploader-2021-11-12.x86_64.tar.gz" + rootfs_treehash: "986217e5b36efd3b3b91ed90df8e36d628cf543f" + workspaces: + # Include the julia we just downloaded + - "/cache/julia-buildkite-plugin:/cache/julia-buildkite-plugin" + # Use coppermind to download the tutorial results that were calculated in the + # weaving job above. Note we still list `outputs` here, since we have the + # option to extract only a subset of them here. + - staticfloat/coppermind#v1: + input_from: "tutorial-{SANITIZED_PATH}" + outputs: + #- html/**/*.html + - markdown/**/figures/*.png + - markdown/**/*.md + - notebook/**/*.ipynb + - pdf/**/*.pdf + - script/**/*.jl + s3_prefix: s3://julialang-buildkite-artifacts/scimltutorials + - staticfloat/ssh-agent: + keyfiles: + - .buildkite/ssh_deploy.key + commands: .buildkite/publish_tutorials_output.sh + # Don't run this unless we're on the master branch, and not until the actual weave + # command has had a chance to run. + depends_on: "tutorial-{SANITIZED_PATH}" + branches: "master" diff --git a/.buildkite/ssh_deploy.key.encrypted b/.buildkite/ssh_deploy.key.encrypted new file mode 100644 index 00000000..9e0edc3a Binary files /dev/null and b/.buildkite/ssh_deploy.key.encrypted differ diff --git a/.buildkite/test_sciml.yml b/.buildkite/test_sciml.yml new file mode 100644 index 00000000..ca906117 --- /dev/null +++ b/.buildkite/test_sciml.yml @@ -0,0 +1,24 @@ +agents: + queue: "juliaecosystem" + arch: "x86_64" + +steps: + - label: ":julia: Run tests on 1.8" + plugins: + - JuliaCI/julia#v1: + version: 1.8 + - JuliaCI/julia-test#v1: + timeout_in_minutes: 20 + artifact_paths: + # Upload .html + - "html/Testing/*.html" + # Upload markdown + - "markdown/Testing/*.md" + # Upload notebook + - "notebook/Testing/*.ipynb" + # Upload .pdf files + - "pdf/Testing/*.pdf" + # Upload Julia script + - "script/Testing/*.jl" + agents: + queue: "juliaecosystem" diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..700707ce --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/CompatHelper.yml b/.github/workflows/CompatHelper.yml new file mode 100644 index 00000000..c3e22990 --- /dev/null +++ b/.github/workflows/CompatHelper.yml @@ -0,0 +1,26 @@ +name: CompatHelper + +on: + schedule: + - cron: '00 00 * * *' + issues: + types: [opened, reopened] + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - name: Pkg.add("CompatHelper") + run: julia -e 'using Pkg; Pkg.add("CompatHelper")' + - name: CompatHelper.main() + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + julia -e ' + using CompatHelper + dirs = filter( + d -> isdir(d) && isfile(joinpath(d, "Project.toml")), + readdir("tutorials"; join=true), + ) + CompatHelper.main(; subdirs=["", dirs...])' diff --git a/.github/workflows/TagBot.yml b/.github/workflows/TagBot.yml new file mode 100644 index 00000000..f49313b6 --- /dev/null +++ b/.github/workflows/TagBot.yml @@ -0,0 +1,15 @@ +name: TagBot +on: + issue_comment: + types: + - created + workflow_dispatch: +jobs: + TagBot: + if: github.event_name == 'workflow_dispatch' || github.actor == 'JuliaTagBot' + runs-on: ubuntu-latest + steps: + - uses: JuliaRegistries/TagBot@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + ssh: ${{ secrets.DOCUMENTER_KEY }} diff --git a/.gitignore b/.gitignore index d96fe5b9..06b4ac54 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,18 @@ -*.jl.cov -*.jl.*.cov -*.jl.mem .ipynb_checkpoints +*/.ipynb_checkpoints/* *.tmp *.aux *.log *.out *.tex +tmp*/ +gks.svg +/*/*/jl_*/ +/Manifest.toml + +# We're going to store these in a separate repository now +html/ +script/ +pdf/ +notebook/ +markdown/ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 00000000..d33fe3aa --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,8 @@ +include: https://raw.githubusercontent.com/SciML/RebuildAction/master/rebuild.yml +variables: + CONTENT_DIR: tutorials + EXCLUDE: exercises/02-workshop_solutions, models/06-pendulum_bayesian_inference + GITHUB_REPOSITORY: SciML/SciMLTutorials.jl + GPU_TAG: nvidia-benchmark + NEEDS_GPU: advanced/01-beeler_reuter + TAGS: nvidia-benchmark diff --git a/CITATION.bib b/CITATION.bib new file mode 100644 index 00000000..c5ed29ff --- /dev/null +++ b/CITATION.bib @@ -0,0 +1,13 @@ +@article{DifferentialEquations.jl-2017, + author = {Rackauckas, Christopher and Nie, Qing}, + doi = {10.5334/jors.151}, + journal = {The Journal of Open Research Software}, + keywords = {Applied Mathematics}, + note = {Exported from https://app.dimensions.ai on 2019/05/05}, + number = {1}, + pages = {}, + title = {DifferentialEquations.jl – A Performant and Feature-Rich Ecosystem for Solving Differential Equations in Julia}, + url = {https://app.dimensions.ai/details/publication/pub.1085583166 and http://openresearchsoftware.metajnl.com/articles/10.5334/jors.151/galley/245/download/}, + volume = {5}, + year = {2017} +} diff --git a/LICENSE.md b/LICENSE.md index 5946f9e2..6ec510bc 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,4 +1,4 @@ -The DiffEqTutorials.jl package is licensed under the MIT "Expat" License: +The SciMLTutorials.jl package is licensed under the MIT "Expat" License: > Copyright (c) 2016: ChrisRackauckas. > @@ -19,4 +19,3 @@ The DiffEqTutorials.jl package is licensed under the MIT "Expat" License: > LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, > OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE > SOFTWARE. -> diff --git a/Manifest.toml b/Manifest.toml deleted file mode 100644 index 564dae1b..00000000 --- a/Manifest.toml +++ /dev/null @@ -1,902 +0,0 @@ -# This file is machine-generated - editing it directly is not advised - -[[AbstractFFTs]] -deps = ["Compat", "LinearAlgebra"] -git-tree-sha1 = "8d59c3b1463b5e0ad05a3698167f85fac90e184d" -uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c" -version = "0.3.2" - -[[AbstractTrees]] -deps = ["Markdown", "Test"] -git-tree-sha1 = "6621d9645702c1c4e6970cc6a3eae440c768000b" -uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" -version = "0.2.1" - -[[Adapt]] -deps = ["LinearAlgebra", "Test"] -git-tree-sha1 = "53d8fec4f662088c1202530e338a11a919407f3b" -uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -version = "0.4.2" - -[[ArbNumerics]] -deps = ["BinDeps", "GenericLinearAlgebra", "Libdl", "LinearAlgebra", "Random", "SpecialFunctions", "Test"] -git-tree-sha1 = "121078c5082c0fa8ec38ccc4eb8f5c472e73fc05" -uuid = "7e558dbc-694d-5a72-987c-6f4ebed21442" -version = "0.3.7" - -[[Arpack]] -deps = ["BinaryProvider", "Libdl", "LinearAlgebra", "Random", "SparseArrays", "Test"] -git-tree-sha1 = "1ce1ce9984683f0b6a587d5bdbc688ecb480096f" -uuid = "7d9fca2a-8960-54d3-9f78-7d1dccf2cb97" -version = "0.3.0" - -[[ArrayInterface]] -deps = ["Requires", "Test"] -git-tree-sha1 = "6a1a371393e56f5e8d5657fe4da4b11aea0bfbae" -uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" -version = "0.1.1" - -[[BandedMatrices]] -deps = ["FillArrays", "LazyArrays", "LinearAlgebra", "Random", "SparseArrays", "Test"] -git-tree-sha1 = "ac5b244e729922c61f75c2e4478e13e8949b1f56" -uuid = "aae01518-5342-5314-be14-df237901396f" -version = "0.8.2" - -[[Base64]] -uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" - -[[BenchmarkTools]] -deps = ["JSON", "Printf", "Statistics", "Test"] -git-tree-sha1 = "5d1dd8577643ba9014574cd40d9c028cd5e4b85a" -uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" -version = "0.4.2" - -[[BinDeps]] -deps = ["Compat", "Libdl", "SHA", "URIParser"] -git-tree-sha1 = "12093ca6cdd0ee547c39b1870e0c9c3f154d9ca9" -uuid = "9e28174c-4ba2-5203-b857-d8d62c4213ee" -version = "0.8.10" - -[[BinaryProvider]] -deps = ["Libdl", "Pkg", "SHA", "Test"] -git-tree-sha1 = "055eb2690182ebc31087859c3dd8598371d3ef9e" -uuid = "b99e7846-7c00-51b0-8f62-c81ae34c0232" -version = "0.5.3" - -[[BoundaryValueDiffEq]] -deps = ["BandedMatrices", "DiffEqBase", "DiffEqDiffTools", "ForwardDiff", "LinearAlgebra", "NLsolve", "Reexport", "SparseArrays", "Test"] -git-tree-sha1 = "0f4a4f86ae63671efd9b41de9dea8ad993f2daad" -uuid = "764a87c0-6b3e-53db-9096-fe964310641d" -version = "2.2.3" - -[[CUDAapi]] -deps = ["Libdl", "Logging", "Test"] -git-tree-sha1 = "e1f551ad1c03b3fa2a966794ead05772bff4b064" -uuid = "3895d2a7-ec45-59b8-82bb-cfc6a382f9b3" -version = "0.6.0" - -[[CUDAdrv]] -deps = ["CUDAapi", "Libdl", "Printf", "Test"] -git-tree-sha1 = "dfe527ba231b6b699f879d1d384c1d39b49fc005" -uuid = "c5f51814-7f29-56b8-a69c-e4d8f6be1fde" -version = "1.0.1" - -[[CUDAnative]] -deps = ["Adapt", "CUDAapi", "CUDAdrv", "InteractiveUtils", "LLVM", "Libdl", "Pkg", "Printf", "Statistics", "Test"] -git-tree-sha1 = "92e3ec4f4458e43cc17be4388b68690dbef24f31" -uuid = "be33ccc6-a3ff-5ff2-a52e-74243cff1e17" -version = "1.0.1" - -[[Calculus]] -deps = ["Compat"] -git-tree-sha1 = "f60954495a7afcee4136f78d1d60350abd37a409" -uuid = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9" -version = "0.4.1" - -[[ChunkedArrays]] -deps = ["EllipsisNotation"] -git-tree-sha1 = "4f2ed36578a061c2c765b280b143358589cd7bd0" -uuid = "8bab3169-4815-5aad-9f88-5df82062e999" -version = "0.1.1" - -[[CodecZlib]] -deps = ["BinaryProvider", "Libdl", "Test", "TranscodingStreams"] -git-tree-sha1 = "36bbf5374c661054d41410dc53ff752972583b9b" -uuid = "944b1d66-785c-5afd-91f1-9de20f533193" -version = "0.5.2" - -[[Codecs]] -deps = ["Test"] -git-tree-sha1 = "70885e5e038cba1c4c17a84ad6c40756e10a4fb5" -uuid = "19ecbf4d-ef7c-5e4b-b54a-0a0ff23c5aed" -version = "0.5.0" - -[[ColorTypes]] -deps = ["FixedPointNumbers", "Random", "Test"] -git-tree-sha1 = "f73b0e10f2a5756de7019818a41654686da06b09" -uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f" -version = "0.7.5" - -[[Colors]] -deps = ["ColorTypes", "FixedPointNumbers", "InteractiveUtils", "Printf", "Reexport", "Test"] -git-tree-sha1 = "9f0a0210450acb91c730b730a994f8eef1d3d543" -uuid = "5ae59095-9a9b-59fe-a467-6f913c188581" -version = "0.9.5" - -[[CommonSubexpressions]] -deps = ["Test"] -git-tree-sha1 = "efdaf19ab11c7889334ca247ff4c9f7c322817b0" -uuid = "bbf7d656-a473-5ed7-a52c-81e309532950" -version = "0.2.0" - -[[Compat]] -deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"] -git-tree-sha1 = "195a3ffcb8b0762684b6821de18f83a16455c6ea" -uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "2.0.0" - -[[Conda]] -deps = ["Compat", "JSON", "VersionParsing"] -git-tree-sha1 = "b625d802587c2150c279a40a646fba63f9bd8187" -uuid = "8f4d0f93-b110-5947-807f-2305c1781a2d" -version = "1.2.0" - -[[Contour]] -deps = ["LinearAlgebra", "StaticArrays", "Test"] -git-tree-sha1 = "b974e164358fea753ef853ce7bad97afec15bb80" -uuid = "d38c429a-6771-53c6-b99e-75d170b6e991" -version = "0.5.1" - -[[CuArrays]] -deps = ["AbstractFFTs", "Adapt", "CUDAapi", "CUDAdrv", "CUDAnative", "DiffRules", "ForwardDiff", "GPUArrays", "LinearAlgebra", "MacroTools", "NNlib", "Pkg", "Printf", "Random", "SparseArrays", "Test"] -git-tree-sha1 = "c1cd8792ca783987fcba2ed0d6b3b58176e6b13e" -uuid = "3a865a2d-5b23-5a0f-bc46-62713ec82fae" -version = "0.9.1" - -[[DataStructures]] -deps = ["InteractiveUtils", "OrderedCollections", "Random", "Serialization", "Test"] -git-tree-sha1 = "ca971f03e146cf144a9e2f2ce59674f5bf0e8038" -uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" -version = "0.15.0" - -[[Dates]] -deps = ["Printf"] -uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" - -[[DecFP]] -deps = ["BinaryProvider", "Compat", "Libdl", "SpecialFunctions"] -git-tree-sha1 = "26512f6d65cab2892bec756cd7c0efff3481113d" -uuid = "55939f99-70c6-5e9b-8bb0-5071ed7d61fd" -version = "0.4.8" - -[[Decimals]] -deps = ["Test"] -git-tree-sha1 = "b374d464d64470e743c5f9172c57352e1a9404ac" -uuid = "abce61dc-4473-55a0-ba07-351d65e31d42" -version = "0.4.0" - -[[DelayDiffEq]] -deps = ["DataStructures", "DiffEqBase", "DiffEqDiffTools", "ForwardDiff", "MuladdMacro", "NLSolversBase", "OrdinaryDiffEq", "RecursiveArrayTools", "Reexport", "Roots", "Test"] -git-tree-sha1 = "8573109883782a75c0d8574150a1beddc420ff66" -uuid = "bcd4f6db-9728-5f36-b5f7-82caef46ccdb" -version = "5.2.0" - -[[DelimitedFiles]] -deps = ["Mmap"] -uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" - -[[Dierckx]] -deps = ["BinaryProvider", "Libdl", "Random", "Test"] -git-tree-sha1 = "27a74763c20938a814da26f31a9e8408d16fec44" -uuid = "39dd38d3-220a-591b-8e3c-4c3a8c710a94" -version = "0.4.1" - -[[DiffBase]] -deps = ["StaticArrays"] -git-tree-sha1 = "38522d70e417cf9ace93848f17eb9fff20d486d2" -uuid = "c5cfe0b6-c10a-51a5-87e3-fd79235949f0" -version = "0.3.2" - -[[DiffEqBase]] -deps = ["Compat", "IteratorInterfaceExtensions", "LinearAlgebra", "RecipesBase", "RecursiveArrayTools", "Requires", "Roots", "SparseArrays", "StaticArrays", "Statistics", "SuiteSparse", "TableTraits", "Test", "TreeViews"] -git-tree-sha1 = "378e03b3a646a26ee08fcafca299b760cef09a11" -uuid = "2b5f629d-d688-5b77-993f-72d75c75574e" -version = "5.4.1" - -[[DiffEqCallbacks]] -deps = ["DataStructures", "DiffEqBase", "LinearAlgebra", "OrdinaryDiffEq", "RecipesBase", "RecursiveArrayTools", "StaticArrays", "Test"] -git-tree-sha1 = "027a13f010f2a93b2df725b7f6202590ce6f559d" -uuid = "459566f4-90b8-5000-8ac3-15dfb0a30def" -version = "2.5.2" - -[[DiffEqDevTools]] -deps = ["BenchmarkTools", "DiffEqBase", "DiffEqMonteCarlo", "DiffEqNoiseProcess", "DiffEqPDEBase", "Distributed", "LinearAlgebra", "NLsolve", "Random", "RecipesBase", "RecursiveArrayTools", "Statistics", "Test"] -git-tree-sha1 = "6f8255811062976d0450049c190c133b89d32b65" -uuid = "f3b72e0c-5b89-59e1-b016-84e28bfd966d" -version = "2.7.1" - -[[DiffEqDiffTools]] -deps = ["LinearAlgebra", "Test"] -git-tree-sha1 = "4b21dd83c341412a0607334ac64bb5593a4bd583" -uuid = "01453d9d-ee7c-5054-8395-0335cb756afa" -version = "0.8.0" - -[[DiffEqFinancial]] -deps = ["DiffEqBase", "DiffEqNoiseProcess", "LinearAlgebra", "Markdown", "RandomNumbers", "Test"] -git-tree-sha1 = "f250512b982b771f6bdb3df05b89df314f2c2580" -uuid = "5a0ffddc-d203-54b0-88ba-2c03c0fc2e67" -version = "2.1.0" - -[[DiffEqJump]] -deps = ["Compat", "DataStructures", "DiffEqBase", "FunctionWrappers", "LinearAlgebra", "Parameters", "PoissonRandom", "Random", "RandomNumbers", "RecursiveArrayTools", "Statistics", "Test", "TreeViews"] -git-tree-sha1 = "b2fe02943312479ecaee1df7e6fef55dea481f1d" -uuid = "c894b116-72e5-5b58-be3c-e6d8d4ac2b12" -version = "6.1.0" - -[[DiffEqMonteCarlo]] -deps = ["DiffEqBase", "Distributed", "Random", "RecursiveArrayTools", "StaticArrays", "Statistics", "Test"] -git-tree-sha1 = "abaf6fa95b05c5c49fd1fb57a901caed10aacd0e" -uuid = "78ddff82-25fc-5f2b-89aa-309469cbf16f" -version = "0.14.0" - -[[DiffEqNoiseProcess]] -deps = ["DataStructures", "DiffEqBase", "LinearAlgebra", "Random", "RandomNumbers", "RecipesBase", "RecursiveArrayTools", "ResettableStacks", "StaticArrays", "Statistics", "Test"] -git-tree-sha1 = "328e3c1629059a7ad150801310cba831557c7e2e" -uuid = "77a26b50-5914-5dd7-bc55-306e6241c503" -version = "3.1.0" - -[[DiffEqOperators]] -deps = ["DiffEqBase", "LinearAlgebra", "Random", "SparseArrays", "StaticArrays", "Test"] -git-tree-sha1 = "332eea616ae687e7e4581602947ad5f053c7c650" -uuid = "9fdde737-9c7f-55bf-ade8-46b3f136cc48" -version = "3.4.0" - -[[DiffEqPDEBase]] -deps = ["ChunkedArrays", "Compat", "DiffEqBase", "RecipesBase", "VectorizedRoutines"] -git-tree-sha1 = "2a997aba53ecff1b19e6c78f1181352787bd9c54" -uuid = "34035eb4-37db-58ae-b003-a3202c898701" -version = "0.4.0" - -[[DiffEqParamEstim]] -deps = ["Calculus", "Dierckx", "DiffEqBase", "DiffEqSensitivity", "Distributions", "ForwardDiff", "LinearAlgebra", "LsqFit", "PenaltyFunctions", "Random", "RecursiveArrayTools", "Test"] -git-tree-sha1 = "f9ad80afe44f92f3bce788b9e4a92d4c4863b18e" -uuid = "1130ab10-4a5a-5621-a13d-e4788d82bd4c" -version = "1.6.0" - -[[DiffEqPhysics]] -deps = ["Dates", "DiffEqBase", "DiffEqCallbacks", "ForwardDiff", "LinearAlgebra", "Printf", "Random", "RecipesBase", "RecursiveArrayTools", "Reexport", "StaticArrays", "Test"] -git-tree-sha1 = "d3dbc53318a6477f496ae2347db98c3ded36c486" -uuid = "055956cb-9e8b-5191-98cc-73ae4a59e68a" -version = "3.1.0" - -[[DiffEqSensitivity]] -deps = ["Compat", "DiffEqBase", "DiffEqCallbacks", "DiffEqDiffTools", "Flux", "ForwardDiff", "LinearAlgebra", "QuadGK", "RecursiveArrayTools", "Statistics", "Test"] -git-tree-sha1 = "6a8eb6df0349e8c9822b2b33e55d8c74d1c337ae" -uuid = "41bf760c-e81c-5289-8e54-58b1f1f8abe2" -version = "3.0.2" - -[[DiffResults]] -deps = ["Compat", "StaticArrays"] -git-tree-sha1 = "34a4a1e8be7bc99bc9c611b895b5baf37a80584c" -uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5" -version = "0.0.4" - -[[DiffRules]] -deps = ["Random", "Test"] -git-tree-sha1 = "dc0869fb2f5b23466b32ea799bd82c76480167f7" -uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" -version = "0.0.10" - -[[DifferentialEquations]] -deps = ["BoundaryValueDiffEq", "DelayDiffEq", "DiffEqBase", "DiffEqCallbacks", "DiffEqFinancial", "DiffEqJump", "DiffEqMonteCarlo", "DiffEqNoiseProcess", "DiffEqPhysics", "DimensionalPlotRecipes", "LinearAlgebra", "MultiScaleArrays", "OrdinaryDiffEq", "Random", "RecursiveArrayTools", "Reexport", "SteadyStateDiffEq", "StochasticDiffEq", "Sundials", "Test"] -git-tree-sha1 = "cabb3b0cd80afcb703e61fcf28f54505d9ae10a1" -uuid = "0c46a032-eb83-5123-abaf-570d42b7fbaa" -version = "6.3.0" - -[[DimensionalPlotRecipes]] -deps = ["LinearAlgebra", "RecipesBase", "Test"] -git-tree-sha1 = "d348688f9a3d02c24455327231c450c272b7401c" -uuid = "c619ae07-58cd-5f6d-b883-8f17bd6a98f9" -version = "0.2.0" - -[[Distances]] -deps = ["LinearAlgebra", "Printf", "Random", "Statistics", "Test"] -git-tree-sha1 = "a135c7c062023051953141da8437ed74f89d767a" -uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" -version = "0.8.0" - -[[Distributed]] -deps = ["Random", "Serialization", "Sockets"] -uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" - -[[Distributions]] -deps = ["Distributed", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SpecialFunctions", "Statistics", "StatsBase", "StatsFuns", "Test"] -git-tree-sha1 = "c24e9b6500c037673f0241a2783472b8c3d080c7" -uuid = "31c24e10-a181-5473-b8eb-7969acd0382f" -version = "0.16.4" - -[[DocStringExtensions]] -deps = ["LibGit2", "Markdown", "Pkg", "Test"] -git-tree-sha1 = "1df01539a1c952cef21f2d2d1c092c2bcf0177d7" -uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" -version = "0.6.0" - -[[DoubleFloats]] -deps = ["GenericLinearAlgebra", "LinearAlgebra", "Polynomials", "Random", "Test"] -git-tree-sha1 = "33569ca05b4b11a5b6de26631d9fc03a6734ea76" -uuid = "497a8b3b-efae-58df-a0af-a86822472b78" -version = "0.7.9" - -[[EllipsisNotation]] -git-tree-sha1 = "c09a512ff36dd5785ddc04fc102f2ff3b7fe05ae" -uuid = "da5c29d0-fa7d-589e-88eb-ea29b0a81949" -version = "0.3.0" - -[[ExponentialUtilities]] -deps = ["LinearAlgebra", "Printf", "Random", "SparseArrays", "Test"] -git-tree-sha1 = "6fad21cd7637d0ad6a7661f8abea1149922d6c9c" -uuid = "d4d017d3-3776-5f7e-afef-a10c40355c18" -version = "1.4.0" - -[[FFTW]] -deps = ["AbstractFFTs", "BinaryProvider", "Compat", "Conda", "Libdl", "LinearAlgebra", "Reexport", "Test"] -git-tree-sha1 = "29cda58afbf62f35b1a094882ad6c745a47b2eaa" -uuid = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" -version = "0.2.4" - -[[FileWatching]] -uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" - -[[FillArrays]] -deps = ["LinearAlgebra", "Random", "SparseArrays", "Test"] -git-tree-sha1 = "471b7e33dc9c9c5b9170045dd57c8ba0927b2918" -uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" -version = "0.4.0" - -[[FixedPointNumbers]] -deps = ["Test"] -git-tree-sha1 = "b8045033701c3b10bf2324d7203404be7aef88ba" -uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93" -version = "0.5.3" - -[[Flux]] -deps = ["AbstractTrees", "Adapt", "CodecZlib", "Colors", "DiffRules", "ForwardDiff", "Juno", "LinearAlgebra", "MacroTools", "NNlib", "NaNMath", "Pkg", "Printf", "Random", "Reexport", "Requires", "SHA", "SpecialFunctions", "Statistics", "StatsBase", "Test", "ZipFile"] -git-tree-sha1 = "28e6dbf663fed71ea607414bc5f2f099d2831c0c" -uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c" -version = "0.7.3" - -[[ForwardDiff]] -deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "InteractiveUtils", "LinearAlgebra", "NaNMath", "Random", "SparseArrays", "SpecialFunctions", "StaticArrays", "Test"] -git-tree-sha1 = "4c4d727f1b7e0092134fabfab6396b8945c1ea5b" -uuid = "f6369f11-7733-5829-9624-2563aa707210" -version = "0.10.3" - -[[FunctionWrappers]] -deps = ["Compat"] -git-tree-sha1 = "49bf793ebd37db5adaa7ac1eae96c2c97ec86db5" -uuid = "069b7b12-0de2-55c6-9aab-29f3d0a68a2e" -version = "1.0.0" - -[[GPUArrays]] -deps = ["Adapt", "FFTW", "FillArrays", "LinearAlgebra", "Printf", "Random", "Serialization", "StaticArrays", "Test"] -git-tree-sha1 = "2b96d7f25fbea82c08a736d78cbf14df8d2100a5" -uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7" -version = "0.6.1" - -[[GR]] -deps = ["Base64", "DelimitedFiles", "LinearAlgebra", "Pkg", "Printf", "Random", "Serialization", "Sockets", "Test"] -git-tree-sha1 = "41bd911efffb56957b45366770eaaa443de3f782" -uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71" -version = "0.38.1" - -[[GenericLinearAlgebra]] -deps = ["LinearAlgebra", "Printf", "Random", "Test"] -git-tree-sha1 = "ca235f9c4652b31525232a36d7832f5ee681d76a" -uuid = "14197337-ba66-59df-a3e3-ca00e7dcff7a" -version = "0.1.0" - -[[GenericSVD]] -deps = ["LinearAlgebra", "Random", "Test"] -git-tree-sha1 = "8aa93c3f3d81562a8962047eafcc5712af0a0f59" -uuid = "01680d73-4ee2-5a08-a1aa-533608c188bb" -version = "0.2.1" - -[[Highlights]] -deps = ["DocStringExtensions", "InteractiveUtils", "REPL", "Test"] -git-tree-sha1 = "286ff83d696dd92748e603a3219618d9e407e872" -uuid = "eafb193a-b7ab-5a9e-9068-77385905fa72" -version = "0.3.1" - -[[IJulia]] -deps = ["Base64", "Conda", "Dates", "InteractiveUtils", "JSON", "Markdown", "MbedTLS", "Pkg", "Printf", "REPL", "Random", "SoftGlobalScope", "Test", "UUIDs", "ZMQ"] -git-tree-sha1 = "84b875a0af6cef89fc812b15a728a583ed34b768" -uuid = "7073ff75-c697-5162-941a-fcdaad2a7d2a" -version = "1.17.0" - -[[InteractiveUtils]] -deps = ["Markdown"] -uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" - -[[IteratorInterfaceExtensions]] -deps = ["Test"] -git-tree-sha1 = "5484e5ede2a4137b9643f4d646e8e7b87b794415" -uuid = "82899510-4779-5014-852e-03e436cf321d" -version = "0.1.1" - -[[JSON]] -deps = ["Dates", "Distributed", "Mmap", "Sockets", "Test", "Unicode"] -git-tree-sha1 = "1f7a25b53ec67f5e9422f1f551ee216503f4a0fa" -uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" -version = "0.20.0" - -[[Juno]] -deps = ["Base64", "Logging", "Media", "Profile", "Test"] -git-tree-sha1 = "ce6246e19061e36cbdce954caaae717498daeed8" -uuid = "e5e0dc1b-0480-54bc-9374-aad01c23163d" -version = "0.5.4" - -[[LLVM]] -deps = ["InteractiveUtils", "Libdl", "Printf", "Test", "Unicode"] -git-tree-sha1 = "d98bd8e6e56591caceb7db300a6877fb6daca6ba" -uuid = "929cbde3-209d-540e-8aea-75f648917ca0" -version = "1.0.0" - -[[LazyArrays]] -deps = ["FillArrays", "LinearAlgebra", "StaticArrays", "Test"] -git-tree-sha1 = "e2761d301816f261b5c6ce7ae0c4718ac1669c25" -uuid = "5078a376-72f3-5289-bfd5-ec5146d43c02" -version = "0.6.0" - -[[LearnBase]] -deps = ["LinearAlgebra", "SparseArrays", "StatsBase", "Test"] -git-tree-sha1 = "c4b5da6d68517f46f70ed5157b28336b56cd2ff3" -uuid = "7f8f8fb0-2700-5f03-b4bd-41f8cfc144b6" -version = "0.2.2" - -[[LibGit2]] -uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" - -[[Libdl]] -uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" - -[[LineSearches]] -deps = ["LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "Printf", "Test"] -git-tree-sha1 = "54eb90e8dbe745d617c78dee1d6ae95c7f6f5779" -uuid = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" -version = "7.0.1" - -[[LinearAlgebra]] -deps = ["Libdl"] -uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" - -[[Logging]] -uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" - -[[LsqFit]] -deps = ["Distributions", "LinearAlgebra", "NLSolversBase", "OptimBase", "Random", "StatsBase", "Test"] -git-tree-sha1 = "831607572cefd71ee5ca4a49aefc29ce3c20502b" -uuid = "2fda8390-95c7-5789-9bda-21331edee243" -version = "0.7.3" - -[[MacroTools]] -deps = ["Compat"] -git-tree-sha1 = "3fd1a3022952128935b449c33552eb65895380c1" -uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" -version = "0.4.5" - -[[Markdown]] -deps = ["Base64"] -uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" - -[[MbedTLS]] -deps = ["BinaryProvider", "Dates", "Distributed", "Libdl", "Random", "Sockets", "Test"] -git-tree-sha1 = "2d94286a9c2f52c63a16146bb86fd6cdfbf677c6" -uuid = "739be429-bea8-5141-9913-cc70e7f3736d" -version = "0.6.8" - -[[Measurements]] -deps = ["Calculus", "LinearAlgebra", "QuadGK", "RecipesBase", "Requires", "SpecialFunctions", "Statistics", "Test"] -git-tree-sha1 = "acfcb9bb16faa1e7f720ba064ac6a55e3e573344" -uuid = "eff96d63-e80a-5855-80a2-b1b0885c5ab7" -version = "2.0.0" - -[[Measures]] -deps = ["Test"] -git-tree-sha1 = "ddfd6d13e330beacdde2c80de27c1c671945e7d9" -uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e" -version = "0.3.0" - -[[Media]] -deps = ["MacroTools", "Test"] -git-tree-sha1 = "75a54abd10709c01f1b86b84ec225d26e840ed58" -uuid = "e89f7d12-3494-54d1-8411-f7d8b9ae1f27" -version = "0.5.0" - -[[Missings]] -deps = ["Dates", "InteractiveUtils", "SparseArrays", "Test"] -git-tree-sha1 = "d1d2585677f2bd93a97cfeb8faa7a0de0f982042" -uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28" -version = "0.4.0" - -[[Mmap]] -uuid = "a63ad114-7e13-5084-954f-fe012c677804" - -[[MuladdMacro]] -deps = ["MacroTools", "Test"] -git-tree-sha1 = "41e6e7c4b448afeaddaac7f496b414854f83b848" -uuid = "46d2c3a1-f734-5fdb-9937-b9b9aeba4221" -version = "0.2.1" - -[[MultiScaleArrays]] -deps = ["DiffEqBase", "LinearAlgebra", "RecursiveArrayTools", "Statistics", "StochasticDiffEq", "Test", "TreeViews"] -git-tree-sha1 = "4220ceea71186db2bb45cb817984c99e563f3662" -uuid = "f9640e96-87f6-5992-9c3b-0743c6a49ffa" -version = "1.4.0" - -[[Mustache]] -deps = ["Printf", "Tables", "Test"] -git-tree-sha1 = "3cc9a0b673519c5c39186e636d747facb12bf075" -uuid = "ffc61752-8dc7-55ee-8c37-f3e9cdd09e70" -version = "0.5.11" - -[[NLSolversBase]] -deps = ["Calculus", "DiffEqDiffTools", "DiffResults", "Distributed", "ForwardDiff", "LinearAlgebra", "Random", "SparseArrays", "Test"] -git-tree-sha1 = "0c6f0e7f2178f78239cfb75310359eed10f2cacb" -uuid = "d41bc354-129a-5804-8e4c-c37616107c6c" -version = "7.3.1" - -[[NLsolve]] -deps = ["DiffBase", "DiffEqDiffTools", "Distances", "ForwardDiff", "LineSearches", "LinearAlgebra", "NLSolversBase", "Printf", "Reexport", "SparseArrays", "Test"] -git-tree-sha1 = "0e046f4f72801c9782d64db972ce66a85d3473f1" -uuid = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" -version = "3.0.1" - -[[NNlib]] -deps = ["Libdl", "LinearAlgebra", "MacroTools", "Requires", "Test"] -git-tree-sha1 = "51330bb45927379007e089997bf548fbe232589d" -uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" -version = "0.4.3" - -[[NaNMath]] -deps = ["Compat"] -git-tree-sha1 = "ce3b85e484a5d4c71dd5316215069311135fa9f2" -uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" -version = "0.3.2" - -[[Optim]] -deps = ["Calculus", "DiffEqDiffTools", "ForwardDiff", "LineSearches", "LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "PositiveFactorizations", "Printf", "Random", "SparseArrays", "StatsBase", "Test"] -git-tree-sha1 = "0f2a6c6ff9db396cc7af15bb1cf057a26662ff17" -uuid = "429524aa-4258-5aef-a3af-852621145aeb" -version = "0.17.2" - -[[OptimBase]] -deps = ["Compat", "NLSolversBase", "Printf", "Reexport", "Test"] -git-tree-sha1 = "92667ab46a66ad502ec3044f65c41ea68b2e0e9c" -uuid = "87e2bd06-a317-5318-96d9-3ecbac512eee" -version = "2.0.0" - -[[OrderedCollections]] -deps = ["Random", "Serialization", "Test"] -git-tree-sha1 = "85619a3f3e17bb4761fe1b1fd47f0e979f964d5b" -uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" -version = "1.0.2" - -[[OrdinaryDiffEq]] -deps = ["DataStructures", "DiffEqBase", "DiffEqDiffTools", "DiffEqOperators", "ExponentialUtilities", "ForwardDiff", "GenericSVD", "InteractiveUtils", "LinearAlgebra", "Logging", "MuladdMacro", "NLsolve", "Parameters", "Printf", "Random", "RecursiveArrayTools", "Reexport", "SparseArrays", "StaticArrays", "Statistics", "Test"] -git-tree-sha1 = "bed4febf428a364adffc2e2c6001aaecf7bf1f7d" -uuid = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -version = "5.3.0" - -[[PDMats]] -deps = ["Arpack", "LinearAlgebra", "SparseArrays", "SuiteSparse", "Test"] -git-tree-sha1 = "b6c91fc0ab970c0563cbbe69af18d741a49ce551" -uuid = "90014a1f-27ba-587c-ab20-58faa44d9150" -version = "0.9.6" - -[[ParameterizedFunctions]] -deps = ["DataStructures", "DiffEqBase", "InteractiveUtils", "LinearAlgebra", "SimpleTraits", "SymEngine", "Test"] -git-tree-sha1 = "18171d999723bf119f82c0a5171a056e5261252b" -uuid = "65888b18-ceab-5e60-b2b9-181511a3b968" -version = "4.1.1" - -[[Parameters]] -deps = ["Markdown", "OrderedCollections", "REPL", "Test"] -git-tree-sha1 = "70bdbfb2bceabb15345c0b54be4544813b3444e4" -uuid = "d96e819e-fc66-5662-9728-84c9c7592b0a" -version = "0.10.3" - -[[PenaltyFunctions]] -deps = ["InteractiveUtils", "LearnBase", "LinearAlgebra", "RecipesBase", "Reexport", "Test"] -git-tree-sha1 = "b0baaa5218ca0ffd6a8ae37ef0b58e0df688ac8b" -uuid = "06bb1623-fdd5-5ca2-a01c-88eae3ea319e" -version = "0.1.2" - -[[Pkg]] -deps = ["Dates", "LibGit2", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"] -uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" - -[[PlotThemes]] -deps = ["PlotUtils", "Requires", "Test"] -git-tree-sha1 = "f3afd2d58e1f6ac9be2cea46e4a9083ccc1d990b" -uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a" -version = "0.3.0" - -[[PlotUtils]] -deps = ["Colors", "Dates", "Printf", "Random", "Reexport", "Test"] -git-tree-sha1 = "fd28f30a294a38ec847de95d8ac7ac916ccd7c06" -uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043" -version = "0.5.5" - -[[Plots]] -deps = ["Base64", "Contour", "Dates", "FixedPointNumbers", "GR", "JSON", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "Reexport", "Requires", "Showoff", "SparseArrays", "StaticArrays", "Statistics", "StatsBase", "Test", "UUIDs"] -git-tree-sha1 = "c68a9ec8a13a5bdcb85c311378a86b7d7b9b0792" -uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -version = "0.23.1" - -[[PoissonRandom]] -deps = ["Random", "Statistics", "Test"] -git-tree-sha1 = "44d018211a56626288b5d3f8c6497d28c26dc850" -uuid = "e409e4f3-bfea-5376-8464-e040bb5c01ab" -version = "0.4.0" - -[[Polynomials]] -deps = ["LinearAlgebra", "SparseArrays", "Test"] -git-tree-sha1 = "62142bd65d3f8aeb2226ec64dd8493349147df94" -uuid = "f27b6e38-b328-58d1-80ce-0feddd5e7a45" -version = "0.5.2" - -[[PositiveFactorizations]] -deps = ["LinearAlgebra", "Test"] -git-tree-sha1 = "86ae7329c4b5c266acf5c7c524a972300d991e1c" -uuid = "85a6dd25-e78a-55b7-8502-1745935b8125" -version = "0.2.1" - -[[Printf]] -deps = ["Unicode"] -uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" - -[[Profile]] -deps = ["Printf"] -uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79" - -[[QuadGK]] -deps = ["DataStructures", "LinearAlgebra", "Test"] -git-tree-sha1 = "3ce467a8e76c6030d4c3786e7d3a73442017cdc0" -uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" -version = "2.0.3" - -[[REPL]] -deps = ["InteractiveUtils", "Markdown", "Sockets"] -uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" - -[[Random]] -deps = ["Serialization"] -uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" - -[[RandomNumbers]] -deps = ["Pkg", "Printf", "Random", "Requires", "Test"] -git-tree-sha1 = "2b965faf50a587c8b419b883e8f19c3bfedde76d" -uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143" -version = "1.2.0" - -[[RecipesBase]] -deps = ["Random", "Test"] -git-tree-sha1 = "0b3cb370ee4dc00f47f1193101600949f3dcf884" -uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" -version = "0.6.0" - -[[RecursiveArrayTools]] -deps = ["ArrayInterface", "RecipesBase", "Requires", "StaticArrays", "Statistics", "Test"] -git-tree-sha1 = "187ea7dd541955102c7035a6668613bdf52022ca" -uuid = "731186ca-8d62-57ce-b412-fbd966d074cd" -version = "0.20.0" - -[[Reexport]] -deps = ["Pkg"] -git-tree-sha1 = "7b1d07f411bc8ddb7977ec7f377b97b158514fe0" -uuid = "189a3867-3050-52da-a836-e630ba90ab69" -version = "0.2.0" - -[[Requires]] -deps = ["Test"] -git-tree-sha1 = "f6fbf4ba64d295e146e49e021207993b6b48c7d1" -uuid = "ae029012-a4dd-5104-9daa-d747884805df" -version = "0.5.2" - -[[ResettableStacks]] -deps = ["Random", "StaticArrays", "Test"] -git-tree-sha1 = "8b4f6cf3c97530e1ba7177ad3bc2b134373da851" -uuid = "ae5879a3-cd67-5da8-be7f-38c6eb64a37b" -version = "0.6.0" - -[[Rmath]] -deps = ["BinaryProvider", "Libdl", "Random", "Statistics", "Test"] -git-tree-sha1 = "9a6c758cdf73036c3239b0afbea790def1dabff9" -uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa" -version = "0.5.0" - -[[Roots]] -deps = ["Compat", "Printf"] -git-tree-sha1 = "2e7171b6f3b58b81201ba37d9e1220aff6d904a1" -uuid = "f2b01f46-fcfa-551c-844a-d8ac1e96c665" -version = "0.7.4" - -[[SHA]] -uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" - -[[Serialization]] -uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" - -[[SharedArrays]] -deps = ["Distributed", "Mmap", "Random", "Serialization"] -uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383" - -[[Showoff]] -deps = ["Compat"] -git-tree-sha1 = "276b24f3ace98bec911be7ff2928d497dc759085" -uuid = "992d4aef-0814-514b-bc4d-f2e9a6c4116f" -version = "0.2.1" - -[[SimpleTraits]] -deps = ["InteractiveUtils", "MacroTools", "Test"] -git-tree-sha1 = "c0a542b8d5e369b179ccd296b2ca987f6da5da0a" -uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d" -version = "0.8.0" - -[[Sockets]] -uuid = "6462fe0b-24de-5631-8697-dd941f90decc" - -[[SoftGlobalScope]] -deps = ["Test"] -git-tree-sha1 = "99e323ab05adfc057b0e8058c6bd90d19052c0a6" -uuid = "b85f4697-e234-5449-a836-ec8e2f98b302" -version = "1.0.9" - -[[SortingAlgorithms]] -deps = ["DataStructures", "Random", "Test"] -git-tree-sha1 = "03f5898c9959f8115e30bc7226ada7d0df554ddd" -uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" -version = "0.3.1" - -[[SparseArrays]] -deps = ["LinearAlgebra", "Random"] -uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" - -[[SpecialFunctions]] -deps = ["BinDeps", "BinaryProvider", "Libdl", "Test"] -git-tree-sha1 = "0b45dc2e45ed77f445617b99ff2adf0f5b0f23ea" -uuid = "276daf66-3868-5448-9aa4-cd146d93841b" -version = "0.7.2" - -[[StaticArrays]] -deps = ["InteractiveUtils", "LinearAlgebra", "Random", "Statistics", "Test"] -git-tree-sha1 = "3841b39ed5f047db1162627bf5f80a9cd3e39ae2" -uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "0.10.3" - -[[Statistics]] -deps = ["LinearAlgebra", "SparseArrays"] -uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" - -[[StatsBase]] -deps = ["DataStructures", "DelimitedFiles", "LinearAlgebra", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "Test"] -git-tree-sha1 = "435707791dc85a67d98d671c1c3fcf1b20b00f94" -uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" -version = "0.29.0" - -[[StatsFuns]] -deps = ["Rmath", "SpecialFunctions", "Test"] -git-tree-sha1 = "b3a4e86aa13c732b8a8c0ba0c3d3264f55e6bb3e" -uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c" -version = "0.8.0" - -[[SteadyStateDiffEq]] -deps = ["Compat", "DiffEqBase", "DiffEqCallbacks", "LinearAlgebra", "NLsolve", "Reexport", "Test"] -git-tree-sha1 = "fe9852d18c3e30f384003da50d6049e5fbc97071" -uuid = "9672c7b4-1e72-59bd-8a11-6ac3964bc41f" -version = "1.4.0" - -[[StochasticDiffEq]] -deps = ["DataStructures", "DiffEqBase", "DiffEqDiffTools", "DiffEqNoiseProcess", "DiffEqOperators", "Distributed", "ForwardDiff", "LinearAlgebra", "Logging", "MuladdMacro", "NLsolve", "Parameters", "Random", "RandomNumbers", "RecursiveArrayTools", "Reexport", "SparseArrays", "StaticArrays", "Test"] -git-tree-sha1 = "af003918bf0ccc7a712b1c8b4d40df59bc76ca8d" -uuid = "789caeaf-c7a9-5a7d-9973-96adeb23e2a0" -version = "6.1.1" - -[[SuiteSparse]] -deps = ["Libdl", "LinearAlgebra", "SparseArrays"] -uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" - -[[Sundials]] -deps = ["BinaryProvider", "DataStructures", "DiffEqBase", "Libdl", "LinearAlgebra", "Reexport", "SparseArrays", "Test"] -git-tree-sha1 = "2ff02f2cd3b4429d05a7b1f36c5caf671e27ee8c" -uuid = "c3572dad-4567-51f8-b174-8c6c989267f4" -version = "3.1.0" - -[[SymEngine]] -deps = ["BinaryProvider", "Compat", "Libdl", "RecipesBase", "SpecialFunctions"] -git-tree-sha1 = "4e8567719e3cb4868febe6480f66ba046ae1d2cb" -uuid = "123dc426-2d89-5057-bbad-38513e3affd8" -version = "0.5.0" - -[[TableTraits]] -deps = ["IteratorInterfaceExtensions", "Test"] -git-tree-sha1 = "eba4b1d0a82bdd773307d652c6e5f8c82104c676" -uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c" -version = "0.4.1" - -[[Tables]] -deps = ["IteratorInterfaceExtensions", "LinearAlgebra", "Requires", "TableTraits", "Test"] -git-tree-sha1 = "5aa45584645393c1717e0cc1f0362c2ea81470a9" -uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" -version = "0.1.17" - -[[Test]] -deps = ["Distributed", "InteractiveUtils", "Logging", "Random"] -uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[[TranscodingStreams]] -deps = ["Pkg", "Random", "Test"] -git-tree-sha1 = "8a032ceb5cf7a28bf1bdb77746b250b9e9fda565" -uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" -version = "0.9.0" - -[[TreeViews]] -deps = ["Test"] -git-tree-sha1 = "8d0d7a3fe2f30d6a7f833a5f19f7c7a5b396eae6" -uuid = "a2a6695c-b41b-5b7d-aed9-dbfdeacea5d7" -version = "0.3.0" - -[[URIParser]] -deps = ["Test", "Unicode"] -git-tree-sha1 = "6ddf8244220dfda2f17539fa8c9de20d6c575b69" -uuid = "30578b45-9adc-5946-b283-645ec420af67" -version = "0.4.0" - -[[UUIDs]] -deps = ["Random", "SHA"] -uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" - -[[Unicode]] -uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" - -[[Unitful]] -deps = ["LinearAlgebra", "Random", "Test"] -git-tree-sha1 = "94736a38908c6b43d22e1abffc4f72ab19dabbc1" -uuid = "1986cc42-f94f-5a68-af5c-568840ba703d" -version = "0.15.0" - -[[VectorizedRoutines]] -deps = ["Distributions", "LinearAlgebra", "Statistics", "StatsFuns", "Test"] -git-tree-sha1 = "39e3690167cd006ad45f6f2ef68f2b83a5130b46" -uuid = "0e69188a-a5d4-5622-b4e4-a72373136fc5" -version = "0.1.0" - -[[VersionParsing]] -deps = ["Compat"] -git-tree-sha1 = "c9d5aa108588b978bd859554660c8a5c4f2f7669" -uuid = "81def892-9a0e-5fdd-b105-ffc91e053289" -version = "1.1.3" - -[[Weave]] -deps = ["Base64", "Compat", "Dates", "Highlights", "JSON", "Markdown", "Mustache", "Pkg", "Printf", "REPL", "Requires", "Serialization", "Test", "YAML"] -git-tree-sha1 = "48453e8d50d7dbfecdb99cff0faad9887f3a2f37" -uuid = "44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9" -version = "0.8.1" - -[[YAML]] -deps = ["Codecs", "Compat"] -git-tree-sha1 = "3bde77cee95cce0c0b9b18813d85e18e8ed4f415" -uuid = "ddb6d928-2868-570f-bddf-ab3f9cf99eb6" -version = "0.3.2" - -[[ZMQ]] -deps = ["BinaryProvider", "FileWatching", "Libdl", "Sockets", "Test"] -git-tree-sha1 = "34e7ac2d1d59d19d0e86bde99f1f02262bfa1613" -uuid = "c2297ded-f4af-51ae-bb23-16f91089e4e1" -version = "1.0.0" - -[[ZipFile]] -deps = ["BinaryProvider", "Libdl", "Printf", "Test"] -git-tree-sha1 = "4000c633efe994b2e10b31b6d91382c4b7412dac" -uuid = "a5390f91-8eb1-5f08-bee0-b1d1ffed6cea" -version = "0.8.0" diff --git a/Project.toml b/Project.toml index 11598d78..d8316660 100644 --- a/Project.toml +++ b/Project.toml @@ -1,33 +1,18 @@ -name = "DiffEqTutorials" -uuid = "6d1b261a-3be8-11e9-3f2f-0b112a9a8436" +name = "SciMLTutorials" +uuid = "30cb0354-2223-46a9-baa0-41bdcfbe0178" authors = ["Chris Rackauckas "] -version = "0.1.0" +version = "1.0.0" [deps] -ArbNumerics = "7e558dbc-694d-5a72-987c-6f4ebed21442" -BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" -CUDAnative = "be33ccc6-a3ff-5ff2-a52e-74243cff1e17" -CuArrays = "3a865a2d-5b23-5a0f-bc46-62713ec82fae" -DecFP = "55939f99-70c6-5e9b-8bb0-5071ed7d61fd" -Decimals = "abce61dc-4473-55a0-ba07-351d65e31d42" -DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" -DiffEqDevTools = "f3b72e0c-5b89-59e1-b016-84e28bfd966d" -DiffEqParamEstim = "1130ab10-4a5a-5621-a13d-e4788d82bd4c" -DiffEqPhysics = "055956cb-9e8b-5191-98cc-73ae4a59e68a" -DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" -DoubleFloats = "497a8b3b-efae-58df-a0af-a86822472b78" -ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" IJulia = "7073ff75-c697-5162-941a-fcdaad2a7d2a" InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -Measurements = "eff96d63-e80a-5855-80a2-b1b0885c5ab7" -Optim = "429524aa-4258-5aef-a3af-852621145aeb" -OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -ParameterizedFunctions = "65888b18-ceab-5e60-b2b9-181511a3b968" +Markdown = "d6f4376e-aef5-505a-96c1-9c027394607a" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" -StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" -Sundials = "c3572dad-4567-51f8-b174-8c6c989267f4" -Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" Weave = "44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9" + +[compat] +IJulia = "1.20" +Plots = "1.6" +Weave = "0.10" +julia = "1.6" diff --git a/README.md b/README.md index b1c9faf6..10971dbd 100644 --- a/README.md +++ b/README.md @@ -1,76 +1,103 @@ -# DiffEqTutorials.jl +# SciMLTutorials.jl: Tutorials for Scientific Machine Learning and Differential Equations -[![Join the chat at https://gitter.im/JuliaDiffEq/Lobby](https://badges.gitter.im/JuliaDiffEq/Lobby.svg)](https://gitter.im/JuliaDiffEq/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Join the chat at https://julialang.zulipchat.com #sciml-bridged](https://img.shields.io/static/v1?label=Zulip&message=chat&color=9558b2&labelColor=389826)](https://julialang.zulipchat.com/#narrow/stream/279055-sciml-bridged) +[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](http://tutorials.sciml.ai/stable/) +[![Global Docs](https://img.shields.io/badge/docs-SciML-blue.svg)](https://docs.sciml.ai/dev/highlevels/learning_resources/#SciMLTutorials) -DiffEqTutorials.jl holds PDFs, webpages, and interactive Jupyter notebooks -showing how to utilize the software in the JuliaDiffEq ecosystem. This set of -tutorials was made to complement the -[documentation](http://docs.juliadiffeq.org/latest/) and the -[devdocs](http://devdocs.juliadiffeq.org/latest/) +[![Build status](https://badge.buildkite.com/8a39c2e1b44511eb84bdcd9019663cad757ae2479abd340508.svg)](https://buildkite.com/julialang/scimltutorials-dot-jl) + +[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac) +[![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle) + +SciMLTutorials.jl holds PDFs, webpages, and interactive Jupyter notebooks +showing how to utilize the software in the [SciML Scientific Machine Learning ecosystem](https://sciml.ai/). +This set of tutorials was made to complement the [documentation](https://sciml.ai/documentation/) +and the [devdocs](http://devdocs.sciml.ai/latest/) by providing practical examples of the concepts. For more details, please consult the docs. +#### Note: this library has been deprecated and its tutorials have been moved to the repos of the respective packages. It may be revived in the future if there is a need for longer-form tutorials! + +## Results + +To view the SciML Tutorials, go to [tutorials.sciml.ai](https://tutorials.sciml.ai/stable/). By default, this +will lead to the latest tagged version of the tutorials. To see the in-development version of the tutorials, go to +[https://tutorials.sciml.ai/dev/](https://tutorials.sciml.ai/dev/). + +Static outputs in pdf, markdown, and html reside in [SciMLTutorialsOutput](https://github.com/SciML/SciMLTutorialsOutput). + +## Video Tutorial + +[![Video Tutorial](https://user-images.githubusercontent.com/1814174/36342812-bdfd0606-13b8-11e8-9eff-ff219de909e5.PNG)](https://youtu.be/KPEqYtEd-zY) + ## Interactive Notebooks -To run the tutorials interactively via Jupyter notebooks, install the package -and open the tutorials like: +To generate the interactive notebooks, first install the SciMLTutorials, instantiate the +environment, and then run `SciMLTutorials.open_notebooks()`. This looks as follows: ```julia -using Pkg -pkg"add https://github.com/JuliaDiffEq/DiffEqTutorials.jl" -using DiffEqTutorials -DiffEqTutorials.open_notebooks() +]add SciMLTutorials#master +]activate SciMLTutorials +]instantiate +using SciMLTutorials +SciMLTutorials.open_notebooks() ``` -## Video Tutorial +The tutorials will be generated at your `pwd()` in a folder called `generated_notebooks`. -[![Video Tutorial](https://user-images.githubusercontent.com/1814174/36342812-bdfd0606-13b8-11e8-9eff-ff219de909e5.PNG)](https://youtu.be/KPEqYtEd-zY) +Note that when running the tutorials, the packages are not automatically added. Thus you +will need to add the packages manually or use the internal Project/Manifest tomls to +instantiate the correct packages. This can be done by activating the folder of the tutorials. +For example, + +```julia +using Pkg +Pkg.activate(joinpath(pkgdir(SciMLTutorials),"tutorials","models")) +Pkg.instantiate() +``` -## Table of Contents - -- Introduction - - [Introduction to DifferentialEquations.jl through ODEs](http://juliadiffeq.org/DiffEqTutorials.jl/html/introduction/ode_introduction.html) - - [Detecting Stiffness and Choosing an ODE Algorithm](http://juliadiffeq.org/DiffEqTutorials.jl/html/introduction/choosing_algs.html) - - [Optimizing your DiffEq Code](http://juliadiffeq.org/DiffEqTutorials.jl/html/introduction/optimizing_diffeq_code.html) - - [Callbacks and Event Handling](http://juliadiffeq.org/DiffEqTutorials.jl/html/introduction/callbacks_and_events.html) - - [Formatting Plots](http://juliadiffeq.org/DiffEqTutorials.jl/html/introduction/formatting_plots.html) -- Modeling Examples - - [Classical Physics Models](http://juliadiffeq.org/DiffEqTutorials.jl/html/models/classical_physics.html) - - [Conditional Dosing Example](http://juliadiffeq.org/DiffEqTutorials.jl/html/models/conditional_dosing.html) - - [DiffEqBiological Tutorial I: Introduction](http://juliadiffeq.org/DiffEqTutorials.jl/html/models/diffeqbio_I_introduction.html) - - [DiffEqBiological Tutorial II: Network Properties API](http://juliadiffeq.org/DiffEqTutorials.jl/html/models/diffeqbio_II_networkproperties.html) - - [Kepler Problem Orbit](http://juliadiffeq.org/DiffEqTutorials.jl/html/models/kepler_problem.html) -- Advanced ODE Features - - [Feagin's Order 10, 12, and 14 Methods](http://juliadiffeq.org/DiffEqTutorials.jl/html/ode_extras/feagin.html) - - [Finding Maxima and Minima of DiffEq Solutions](http://juliadiffeq.org/DiffEqTutorials.jl/html/ode_extras/ode_minmax.html) - - [Monte Carlo Parameter Estimation from Data](http://juliadiffeq.org/DiffEqTutorials.jl/html/ode_extras/monte_carlo_parameter_estim.html) -- Type Handling - - [Solving Equations with Julia-Defined Types](http://juliadiffeq.org/DiffEqTutorials.jl/html/type_handling/number_types.html) - - [Numbers with Uncertainties](http://juliadiffeq.org/DiffEqTutorials.jl/html/type_handling/uncertainties.html) - - [Unit Check Arithmetic via Unitful.jl](http://juliadiffeq.org/DiffEqTutorials.jl/html/type_handling/unitful.html) -- Advanced - - [A 2D Cardiac Electrophysiology Model (CUDA-accelerated PDE solver)](http://juliadiffeq.org/DiffEqTutorials.jl/html/advanced/beeler_reuter.html) +will add all of the packages required to run any tutorial in the `models` folder. ## Contributing -First of all, make sure that your current directory is `DiffEqTutorials`. All -of the files are generated from the Weave.jl files in the `tutorials` folder. +All of the files are generated from the Weave.jl files in the `tutorials` folder. The generation process runs automatically, +and thus one does not necessarily need to test the Weave process locally. Instead, simply open a PR that adds/updates a +file in the "tutorials" folder and the PR will generate the tutorial on demand. Its artifacts can then be inspected in the +Buildkite as described below before merging. Note that it will use the Project.toml and Manifest.toml of the subfolder, so +any changes to dependencies requires that those are updated. + +### Reporting Bugs and Issues + +Report any bugs or issues at [the SciMLTutorials repository](https://github.com/SciML/SciMLTutorials.jl/issues). + +### Inspecting Tutorial Results + +To see tutorial results before merging, click into the BuildKite, click onto +Artifacts, and then investigate the trained results. + +![](https://user-images.githubusercontent.com/1814174/118359358-02ddc980-b551-11eb-8a9b-24de947cefee.PNG) + +### Manually Generating Files + To run the generation process, do for example: ```julia -using Pkg, DiffEqTutorials -cd(joinpath(dirname(pathof(DiffEqTutorials)), "..")) -Pkg.pkg"activate ." -Pkg.pkg"instantiate" -DiffEqTutorials.weave_file("introduction","ode_introduction.jmd") +]activate SciMLTutorials # Get all of the packages +using SciMLTutorials +SciMLTutorials.weave_file(joinpath(pkgdir(SciMLTutorials),"tutorials","models"),"01-classical_physics.jmd") +``` + +To generate all of the files in a folder, for example, run: + +```julia +SciMLTutorials.weave_folder(joinpath(pkgdir(SciMLTutorials),"tutorials","models")) ``` To generate all of the notebooks, do: ```julia -DiffEqTutorials.weave_all() +SciMLTutorials.weave_all() ``` -If you add new tutorials which require new packages, simply updating your local -environment will change the project and manifest files. When this occurs, the -updated environment files should be included in the PR. +Each of the tuturials displays the computer characteristics at the bottom of +the benchmark. diff --git a/REQUIRE b/REQUIRE deleted file mode 100644 index e9e3d679..00000000 --- a/REQUIRE +++ /dev/null @@ -1,2 +0,0 @@ -Weave -IJulia diff --git a/docs/Project.toml b/docs/Project.toml new file mode 100644 index 00000000..dfa65cd1 --- /dev/null +++ b/docs/Project.toml @@ -0,0 +1,2 @@ +[deps] +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" diff --git a/docs/extrasrc/assets/favicon.ico b/docs/extrasrc/assets/favicon.ico new file mode 100644 index 00000000..3c6bd470 Binary files /dev/null and b/docs/extrasrc/assets/favicon.ico differ diff --git a/docs/extrasrc/assets/logo.png b/docs/extrasrc/assets/logo.png new file mode 100644 index 00000000..6f4c3e26 Binary files /dev/null and b/docs/extrasrc/assets/logo.png differ diff --git a/docs/make.jl b/docs/make.jl new file mode 100644 index 00000000..19fec3c8 --- /dev/null +++ b/docs/make.jl @@ -0,0 +1,36 @@ +using Documenter, SciMLTutorialsOutput + +dir = @__DIR__() * "/.." + +@show dir +@show readdir(dir) + +include("pages.jl") + +mathengine = MathJax3(Dict(:loader => Dict("load" => ["[tex]/require", "[tex]/mathtools"]), + :tex => Dict("inlineMath" => [["\$", "\$"], ["\\(", "\\)"]], + "packages" => [ + "base", + "ams", + "autoload", + "mathtools", + "require" + ]))) + +makedocs( + sitename = "The SciML Tutorials", + authors = "Chris Rackauckas", + modules = [SciMLTutorialsOutput], + clean = true, doctest = false, + format = Documenter.HTML(#analytics = "UA-90474609-3", + assets = ["assets/favicon.ico"], + canonical = "https://tutorials.sciml.ai/stable/", + mathengine = mathengine), + pages = pages +) + +deploydocs(; + repo = "github.com/SciML/SciMLTutorialsOutput", + devbranch = "main", + branch = "main" +) diff --git a/docs/pages.jl b/docs/pages.jl new file mode 100644 index 00000000..e8f8df4e --- /dev/null +++ b/docs/pages.jl @@ -0,0 +1,50 @@ +# This file assumes `dir` is the directory for the package! dir = @__DIR__() * "/.." + +dir = @__DIR__() * "/.." + +cp(joinpath(dir, "markdown"), joinpath(dir, "docs", "src"), force = true) +cp(joinpath(dir, "docs", "extrasrc", "assets"), joinpath(dir, "docs", "src", "assets"), force = true) +cp(joinpath(dir, "README.md"), joinpath(dir, "docs", "src", "index.md"), force = true) +tutorialsdir = joinpath(dir, "docs", "src") + +pages = Any["SciMLTutorials.jl: Tutorials for Scientific Machine Learning (SciML), Equation Solvers, and AI for Science" => "index.md"] + +for folder in readdir(tutorialsdir) + newpages = Any[] + if folder[(end - 2):end] != ".md" && folder != "Testing" && folder != "figures" && + folder != "assets" + for file in filter(x -> x[(end - 2):end] == ".md", readdir( + joinpath(tutorialsdir, folder))) + try + filecontents = readlines(joinpath(tutorialsdir, folder, file)) + title = filecontents[3][9:(end - 1)] + + # Cut out the first 5 lines from the file to remove the Weave header stuff + open(joinpath(tutorialsdir, folder, file), "w") do output + println(output, "# $title") + for line in Iterators.drop(filecontents, 4) + println(output, line) + end + end + push!(newpages, title => joinpath(folder, file)) + catch e + @show folder, file, e + end + end + push!(pages, folder => newpages) + end +end + +# The result is in alphabetical order, change to the wanted order + +permute!(pages, + [1] +) + +names = [ + "SciMLTutorials.jl: Tutorials for Scientific Machine Learning (SciML) and Equation Solvers" +] + +for i in 1:length(pages) + pages[i] = names[i] => pages[i][2] +end diff --git a/docs/src/markdown/blank.jl b/docs/src/markdown/blank.jl new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/docs/src/markdown/blank.jl @@ -0,0 +1 @@ + diff --git a/html/advanced/beeler_reuter.html b/html/advanced/beeler_reuter.html deleted file mode 100644 index 32bea511..00000000 --- a/html/advanced/beeler_reuter.html +++ /dev/null @@ -1,1448 +0,0 @@ - - - - - - An Implicit/Explicit CUDA-Accelerated Solver for the 2D Beeler-Reuter Model - - - - - - - - - - - - - - - - - -
-
-
- -
-

An Implicit/Explicit CUDA-Accelerated Solver for the 2D Beeler-Reuter Model

-
Shahriar Iravanian
- -
- -

Background

-

JuliaDiffEq is a suite of optimized Julia libraries to solve ordinary differential equations (ODE). JuliaDiffEq provides a large number of explicit and implicit solvers suited for different types of ODE problems. It is possible to reduce a system of partial differential equations into an ODE problem by employing the method of lines (MOL). The essence of MOL is to discretize the spatial derivatives (by finite difference, finite volume or finite element methods) into algebraic equations and to keep the time derivatives as is. The resulting differential equations are left with only one independent variable (time) and can be solved with an ODE solver. Solving Systems of Stochastic PDEs and using GPUs in Julia is a brief introduction to MOL and using GPUs to accelerate PDE solving in JuliaDiffEq. Here we expand on this introduction by developing an implicit/explicit (IMEX) solver for a 2D cardiac electrophysiology model and show how to use CuArray and CUDAnative libraries to run the explicit part of the model on a GPU.

-

Note that this tutorial does not use the higher order IMEX methods built into DifferentialEquations.jl but instead shows how to hand-split an equation when the explicit portion has an analytical solution (or approxiate), which is common in many scenarios.

-

There are hundreds of ionic models that describe cardiac electrical activity in various degrees of detail. Most are based on the classic Hodgkin-Huxley model and define the time-evolution of different state variables in the form of nonlinear first-order ODEs. The state vector for these models includes the transmembrane potential, gating variables, and ionic concentrations. The coupling between cells is through the transmembrame potential only and is described as a reaction-diffusion equation, which is a parabolic PDE,

-

\[ -\partial V / \partial t = \nabla (D \nabla V) - \frac {I_\text{ion}} {C_m}, -\]

-

where $V$ is the transmembrane potential, $D$ is a diffusion tensor, $I_\text{ion}$ is the sum of the transmembrane currents and is calculated from the ODEs, and $C_m$ is the membrane capacitance and is usually assumed to be constant. Here we model a uniform and isotropic medium. Therefore, the model can be simplified to,

-

\[ -\partial V / \partial t = D \Delta{V} - \frac {I_\text{ion}} {C_m}, -\]

-

where $D$ is now a scalar. By nature, these models have to deal with different time scales and are therefore classified as stiff. Commonly, they are solved using the explicit Euler method, usually with a closed form for the integration of the gating variables (the Rush-Larsen method, see below). We can also solve these problems using implicit or semi-implicit PDE solvers (e.g., the Crank-Nicholson method combined with an iterative solver). Higher order explicit methods such as Runge-Kutta and linear multi-step methods cannot overcome the stiffness and are not particularly helpful.

-

In this tutorial, we first develop a CPU-only IMEX solver and then show how to move the explicit part to a GPU.

-

The Beeler-Reuter Model

-

We have chosen the Beeler-Reuter ventricular ionic model as our example. It is a classic model first described in 1977 and is used as a base for many other ionic models. It has eight state variables, which makes it complicated enough to be interesting without obscuring the main points of the exercise. The eight state variables are: the transmembrane potential ($V$), sodium-channel activation and inactivation gates ($m$ and $h$, similar to the Hodgkin-Huxley model), with an additional slow inactivation gate ($j$), calcium-channel activation and deactivations gates ($d$ and $f$), a time-dependent inward-rectifying potassium current gate ($x_1$), and intracellular calcium concentration ($c$). There are four currents: a sodium current ($i_{Na}$), a calcium current ($i_{Ca}$), and two potassium currents, one time-dependent ($i_{x_1}$) and one background time-independent ($i_{K_1}$).

-

CPU-Only Beeler-Reuter Solver

-

Let's start by developing a CPU only IMEX solver. The main idea is to use the DifferentialEquations framework to handle the implicit part of the equation and code the analytical approximation for explicit part separately. If no analytical approximation was known for the explicit part, one could use methods from this list.

-

First, we define the model constants:

- - -
-const v0 = -84.624
-const v1 = 10.0
-const C_K1 = 1.0f0
-const C_x1 = 1.0f0
-const C_Na = 1.0f0
-const C_s = 1.0f0
-const D_Ca = 0.0f0
-const D_Na = 0.0f0
-const g_s = 0.09f0
-const g_Na = 4.0f0
-const g_NaC = 0.005f0
-const ENa = 50.0f0 + D_Na
-const γ = 0.5f0
-const C_m = 1.0f0
-
- - -
-1.0f0
-
- - -

Note that the constants are defined as Float32 and not Float64. The reason is that most GPUs have many more single precision cores than double precision ones. To ensure uniformity between CPU and GPU, we also code most states variables as Float32 except for the transmembrane potential, which is solved by an implicit solver provided by the Sundial library and needs to be Float64.

-

The State Structure

-

Next, we define a struct to contain our state. BeelerReuterCpu is a functor and we will define a deriv function as its associated function.

- - -
-mutable struct BeelerReuterCpu <: Function
-    t::Float64              # the last timestep time to calculate Δt
-    diff_coef::Float64      # the diffusion-coefficient (coupling strength)
-
-    C::Array{Float32, 2}    # intracellular calcium concentration
-    M::Array{Float32, 2}    # sodium current activation gate (m)
-    H::Array{Float32, 2}    # sodium current inactivation gate (h)
-    J::Array{Float32, 2}    # sodium current slow inactivaiton gate (j)
-    D::Array{Float32, 2}    # calcium current activaiton gate (d)
-    F::Array{Float32, 2}    # calcium current inactivation gate (f)
-    XI::Array{Float32, 2}   # inward-rectifying potassium current (iK1)
-
-    Δu::Array{Float64, 2}   # place-holder for the Laplacian
-
-    function BeelerReuterCpu(u0, diff_coef)
-        self = new()
-
-        ny, nx = size(u0)
-        self.t = 0.0
-        self.diff_coef = diff_coef
-
-        self.C = fill(0.0001f0, (ny,nx))
-        self.M = fill(0.01f0, (ny,nx))
-        self.H = fill(0.988f0, (ny,nx))
-        self.J = fill(0.975f0, (ny,nx))
-        self.D = fill(0.003f0, (ny,nx))
-        self.F = fill(0.994f0, (ny,nx))
-        self.XI = fill(0.0001f0, (ny,nx))
-
-        self.Δu = zeros(ny,nx)
-
-        return self
-    end
-end
-
- - - -

Laplacian

-

The finite-difference Laplacian is calculated in-place by a 5-point stencil. The Neumann boundary condition is enforced. Note that we could have also used DiffEqOperators.jl to automate this step.

- - -
-# 5-point stencil
-function laplacian(Δu, u)
-    n1, n2 = size(u)
-
-    # internal nodes
-    for j = 2:n2-1
-        for i = 2:n1-1
-            @inbounds  Δu[i,j] = u[i+1,j] + u[i-1,j] + u[i,j+1] + u[i,j-1] - 4*u[i,j]
-        end
-    end
-
-    # left/right edges
-    for i = 2:n1-1
-        @inbounds Δu[i,1] = u[i+1,1] + u[i-1,1] + 2*u[i,2] - 4*u[i,1]
-        @inbounds Δu[i,n2] = u[i+1,n2] + u[i-1,n2] + 2*u[i,n2-1] - 4*u[i,n2]
-    end
-
-    # top/bottom edges
-    for j = 2:n2-1
-        @inbounds Δu[1,j] = u[1,j+1] + u[1,j-1] + 2*u[2,j] - 4*u[1,j]
-        @inbounds Δu[n1,j] = u[n1,j+1] + u[n1,j-1] + 2*u[n1-1,j] - 4*u[n1,j]
-    end
-
-    # corners
-    @inbounds Δu[1,1] = 2*(u[2,1] + u[1,2]) - 4*u[1,1]
-    @inbounds Δu[n1,1] = 2*(u[n1-1,1] + u[n1,2]) - 4*u[n1,1]
-    @inbounds Δu[1,n2] = 2*(u[2,n2] + u[1,n2-1]) - 4*u[1,n2]
-    @inbounds Δu[n1,n2] = 2*(u[n1-1,n2] + u[n1,n2-1]) - 4*u[n1,n2]
-end
-
- - -
-laplacian (generic function with 1 method)
-
- - -

The Rush-Larsen Method

-

We use an explicit solver for all the state variables except for the transmembrane potential which is solved with the help of an implicit solver. The explicit solver is a domain-specific exponential method, the Rush-Larsen method. This method utilizes an approximation on the model in order to transform the IMEX equation into a form suitable for an implicit ODE solver. This combination of implicit and explicit methods forms a specialized IMEX solver. For general IMEX integration, please see the IMEX solvers documentation. While we could have used the general model to solve the current problem, for this specific model, the transformation approach is more efficient and is of practical interest.

-

The Rush-Larsen method replaces the explicit Euler integration for the gating variables with direct integration. The starting point is the general ODE for the gating variables in Hodgkin-Huxley style ODEs,

-

\[ -\frac{dg}{dt} = \alpha(V) (1 - g) - \beta(V) g -\]

-

where $g$ is a generic gating variable, ranging from 0 to 1, and $\alpha$ and $\beta$ are reaction rates. This equation can be written as,

-

\[ -\frac{dg}{dt} = (g_{\infty} - g) / \tau_g, -\]

-

where $g_\infty$ and $\tau_g$ are

-

\[ -g_{\infty} = \frac{\alpha}{(\alpha + \beta)}, -\]

-

and,

-

\[ -\tau_g = \frac{1}{(\alpha + \beta)}. -\]

-

Assuing that $g_\infty$ and $\tau_g$ are constant for the duration of a single time step ($\Delta{t}$), which is a reasonable assumption for most cardiac models, we can integrate directly to have,

-

\[ -g(t + \Delta{t}) = g_{\infty} - \left(g_{\infty} - g(\Delta{t})\right)\,e^{-\Delta{t}/\tau_g}. -\]

-

This is the Rush-Larsen technique. Note that as $\Delta{t} \rightarrow 0$, this equations morphs into the explicit Euler formula,

-

\[ -g(t + \Delta{t}) = g(t) + \Delta{t}\frac{dg}{dt}. -\]

-

rush_larsen is a helper function that use the Rush-Larsen method to integrate the gating variables.

- - -
-@inline function rush_larsen(g, α, β, Δt)
-    inf = α/(α+β)
-    τ = 1f0 / (α+β)
-    return clamp(g + (g - inf) * expm1(-Δt/τ), 0f0, 1f0)
-end
-
- - -
-rush_larsen (generic function with 1 method)
-
- - -

The gating variables are updated as below. The details of how to calculate $\alpha$ and $\beta$ are based on the Beeler-Reuter model and not of direct interest to this tutorial.

- - -
-function update_M_cpu(g, v, Δt)
-    # the condition is needed here to prevent NaN when v == 47.0
-    α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (exp(-0.1f0*(v+47.0f0)) - 1.0f0)
-    β = (40.0f0 * exp(-0.056f0*(v+72.0f0)))
-    return rush_larsen(g, α, β, Δt)
-end
-
-function update_H_cpu(g, v, Δt)
-    α = 0.126f0 * exp(-0.25f0*(v+77.0f0))
-    β = 1.7f0 / (exp(-0.082f0*(v+22.5f0)) + 1.0f0)
-   return rush_larsen(g, α, β, Δt)
-end
-
-function update_J_cpu(g, v, Δt)
-    α = (0.55f0 * exp(-0.25f0*(v+78.0f0))) / (exp(-0.2f0*(v+78.0f0)) + 1.0f0)
-    β = 0.3f0 / (exp(-0.1f0*(v+32.0f0)) + 1.0f0)
-    return rush_larsen(g, α, β, Δt)
-end
-
-function update_D_cpu(g, v, Δt)
-    α = γ * (0.095f0 * exp(-0.01f0*(v-5.0f0))) / (exp(-0.072f0*(v-5.0f0)) + 1.0f0)
-    β = γ * (0.07f0 * exp(-0.017f0*(v+44.0f0))) / (exp(0.05f0*(v+44.0f0)) + 1.0f0)
-    return rush_larsen(g, α, β, Δt)
-end
-
-function update_F_cpu(g, v, Δt)
-    α = γ * (0.012f0 * exp(-0.008f0*(v+28.0f0))) / (exp(0.15f0*(v+28.0f0)) + 1.0f0)
-    β = γ * (0.0065f0 * exp(-0.02f0*(v+30.0f0))) / (exp(-0.2f0*(v+30.0f0)) + 1.0f0)
-    return rush_larsen(g, α, β, Δt)
-end
-
-function update_XI_cpu(g, v, Δt)
-    α = (0.0005f0 * exp(0.083f0*(v+50.0f0))) / (exp(0.057f0*(v+50.0f0)) + 1.0f0)
-    β = (0.0013f0 * exp(-0.06f0*(v+20.0f0))) / (exp(-0.04f0*(v+20.0f0)) + 1.0f0)
-    return rush_larsen(g, α, β, Δt)
-end
-
- - -
-update_XI_cpu (generic function with 1 method)
-
- - -

The intracelleular calcium is not technically a gating variable, but we can use a similar explicit exponential integrator for it.

- - -
-function update_C_cpu(g, d, f, v, Δt)
-    ECa = D_Ca - 82.3f0 - 13.0278f0 * log(g)
-    kCa = C_s * g_s * d * f
-    iCa = kCa * (v - ECa)
-    inf = 1.0f-7 * (0.07f0 - g)
-    τ = 1f0 / 0.07f0
-    return g + (g - inf) * expm1(-Δt/τ)
-end
-
- - -
-update_C_cpu (generic function with 1 method)
-
- - -

Implicit Solver

-

Now, it is time to define the derivative function as an associated function of BeelerReuterCpu. We plan to use the CVODE_BDF solver as our implicit portion. Similar to other iterative methods, it calls the deriv function with the same $t$ multiple times. For example, these are consecutive $t$s from a representative run:

-

0.86830 0.86830 0.85485 0.85485 0.85485 0.86359 0.86359 0.86359 0.87233 0.87233 0.87233 0.88598 ...

-

Here, every time step is called three times. We distinguish between two types of calls to the deriv function. When $t$ changes, the gating variables are updated by calling update_gates_cpu:

- - -
-function update_gates_cpu(u, XI, M, H, J, D, F, C, Δt)
-    let Δt = Float32(Δt)
-        n1, n2 = size(u)
-        for j = 1:n2
-            for i = 1:n1
-                v = Float32(u[i,j])
-
-                XI[i,j] = update_XI_cpu(XI[i,j], v, Δt)
-                M[i,j] = update_M_cpu(M[i,j], v, Δt)
-                H[i,j] = update_H_cpu(H[i,j], v, Δt)
-                J[i,j] = update_J_cpu(J[i,j], v, Δt)
-                D[i,j] = update_D_cpu(D[i,j], v, Δt)
-                F[i,j] = update_F_cpu(F[i,j], v, Δt)
-
-                C[i,j] = update_C_cpu(C[i,j], D[i,j], F[i,j], v, Δt)
-            end
-        end
-    end
-end
-
- - -
-update_gates_cpu (generic function with 1 method)
-
- - -

On the other hand, du is updated at each time step, since it is independent of $\Delta{t}$.

- - -
-# iK1 is the inward-rectifying potassium current
-function calc_iK1(v)
-    ea = exp(0.04f0*(v+85f0))
-    eb = exp(0.08f0*(v+53f0))
-    ec = exp(0.04f0*(v+53f0))
-    ed = exp(-0.04f0*(v+23f0))
-    return 0.35f0 * (4f0*(ea-1f0)/(eb + ec)
-            + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed)))
-end
-
-# ix1 is the time-independent background potassium current
-function calc_ix1(v, xi)
-    ea = exp(0.04f0*(v+77f0))
-    eb = exp(0.04f0*(v+35f0))
-    return xi * 0.8f0 * (ea-1f0) / eb
-end
-
-# iNa is the sodium current (similar to the classic Hodgkin-Huxley model)
-function calc_iNa(v, m, h, j)
-    return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa)
-end
-
-# iCa is the calcium current
-function calc_iCa(v, d, f, c)
-    ECa = D_Ca - 82.3f0 - 13.0278f0 * log(c)    # ECa is the calcium reversal potential
-    return C_s * g_s * d * f * (v - ECa)
-end
-
-function update_du_cpu(du, u, XI, M, H, J, D, F, C)
-    n1, n2 = size(u)
-
-    for j = 1:n2
-        for i = 1:n1
-            v = Float32(u[i,j])
-
-            # calculating individual currents
-            iK1 = calc_iK1(v)
-            ix1 = calc_ix1(v, XI[i,j])
-            iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j])
-            iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j])
-
-            # total current
-            I_sum = iK1 + ix1 + iNa + iCa
-
-            # the reaction part of the reaction-diffusion equation
-            du[i,j] = -I_sum / C_m
-        end
-    end
-end
-
- - -
-update_du_cpu (generic function with 1 method)
-
- - -

Finally, we put everything together is our deriv function, which is a call on BeelerReuterCpu.

- - -
-function (f::BeelerReuterCpu)(du, u, p, t)
-    Δt = t - f.t
-
-    if Δt != 0 || t == 0
-        update_gates_cpu(u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C, Δt)
-        f.t = t
-    end
-
-    laplacian(f.Δu, u)
-
-    # calculate the reaction portion
-    update_du_cpu(du, u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C)
-
-    # ...add the diffusion portion
-    du .+= f.diff_coef .* f.Δu
-end
-
- - - -

Results

-

Time to test! We need to define the starting transmembrane potential with the help of global constants v0 and v1, which represent the resting and activated potentials.

- - -
-const N = 192;
-u0 = fill(v0, (N, N));
-u0[90:102,90:102] .= v1;   # a small square in the middle of the domain
-
- - - -

The initial condition is a small square in the middle of the domain.

- - -
-using Plots
-heatmap(u0)
-
- - - - -

Next, the problem is defined:

- - -
-using DifferentialEquations, Sundials
-
-deriv_cpu = BeelerReuterCpu(u0, 1.0);
-prob = ODEProblem(deriv_cpu, u0, (0.0, 50.0));
-
- - - -

For stiff reaction-diffusion equations, CVODE_BDF from Sundial library is an excellent solver.

- - -
-@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0);
-
- - -
-31.837224 seconds (6.03 M allocations: 314.845 MiB, 0.37% gc time)
-
- - - -
-heatmap(sol.u[end])
-
- - - - -

CPU/GPU Beeler-Reuter Solver

-

GPUs are great for embarrassingly parallel problems but not so much for highly coupled models. We plan to keep the implicit part on CPU and run the decoupled explicit code on a GPU with the help of the CUDAnative library.

-

GPUs and CUDA

-

It this section, we present a brief summary of how GPUs (specifically NVIDIA GPUs) work and how to program them using the Julia CUDA interface. The readers who are familiar with these basic concepts may skip this section.

-

Let's start by looking at the hardware of a typical high-end GPU, GTX 1080. It has four Graphics Processing Clusters (equivalent to a discrete CPU), each harboring five Streaming Multiprocessor (similar to a CPU core). Each SM has 128 single-precision CUDA cores. Therefore, GTX 1080 has a total of 4 x 5 x 128 = 2560 CUDA cores. The maximum theoretical throughput for a GTX 1080 is reported as 8.87 TFLOPS. This figure is calculated for a boost clock frequency of 1.733 MHz as 2 x 2560 x 1.733 MHz = 8.87 TFLOPS. The factor 2 is included because two single floating point operations, a multiplication and an addition, can be done in a clock cycle as part of a fused-multiply-addition FMA operation. GTX 1080 also has 8192 MB of global memory accessible to all the cores (in addition to local and shared memory on each SM).

-

A typical CUDA application has the following flow:

-
    -
  1. Define and initialize the problem domain tensors (multi-dimensional arrays) in CPU memory.

    -
  2. -
  3. Allocate corresponding tensors in the GPU global memory.

    -
  4. -
  5. Transfer the input tensors from CPU to the corresponding GPU tensors.

    -
  6. -
  7. Invoke CUDA kernels (i.e., the GPU functions callable from CPU) that operate on the GPU tensors.

    -
  8. -
  9. Transfer the result tensors from GPU back to CPU.

    -
  10. -
  11. Process tensors on CPU.

    -
  12. -
  13. Repeat steps 3-6 as needed.

    -
  14. -
-

Some libraries, such as ArrayFire, hide the complexicities of steps 2-5 behind a higher level of abstraction. However, here we take a lower level route. By using CuArray and CUDAnative, we achieve a finer-grained control and higher performance. In return, we need to implement each step manually.

-

CuArray is a thin abstraction layer over the CUDA API and allows us to define GPU-side tensors and copy data to and from them but does not provide for operations on tensors. CUDAnative is a compiler that translates Julia functions designated as CUDA kernels into ptx (a high-level CUDA assembly language).

-

The CUDA Code

-

The key to fast CUDA programs is to minimize CPU/GPU memory transfers and global memory accesses. The implicit solver is currently CPU only, but it only needs access to the transmembrane potential. The rest of state variables reside on the GPU memory.

-

We modify $BeelerReuterCpu$ into $BeelerReuterGpu$ by defining the state variables as CuArrays instead of standard Julia Arrays. The name of each variable defined on GPU is prefixed by d_ for clarity. Note that $\Delta{v}$ is a temporary storage for the Laplacian and stays on the CPU side.

- - -
-using CUDAnative, CuArrays
-
-mutable struct BeelerReuterGpu <: Function
-    t::Float64                  # the last timestep time to calculate Δt
-    diff_coef::Float64          # the diffusion-coefficient (coupling strength)
-
-    d_C::CuArray{Float32, 2}    # intracellular calcium concentration
-    d_M::CuArray{Float32, 2}    # sodium current activation gate (m)
-    d_H::CuArray{Float32, 2}    # sodium current inactivation gate (h)
-    d_J::CuArray{Float32, 2}    # sodium current slow inactivaiton gate (j)
-    d_D::CuArray{Float32, 2}    # calcium current activaiton gate (d)
-    d_F::CuArray{Float32, 2}    # calcium current inactivation gate (f)
-    d_XI::CuArray{Float32, 2}   # inward-rectifying potassium current (iK1)
-
-    d_u::CuArray{Float64, 2}    # place-holder for u in the device memory
-    d_du::CuArray{Float64, 2}   # place-holder for d_u in the device memory
-
-    Δv::Array{Float64, 2}       # place-holder for voltage gradient
-
-    function BeelerReuterGpu(u0, diff_coef)
-        self = new()
-
-        ny, nx = size(u0)
-        @assert (nx % 16 == 0) && (ny % 16 == 0)
-        self.t = 0.0
-        self.diff_coef = diff_coef
-
-        self.d_C = CuArray(fill(0.0001f0, (ny,nx)))
-        self.d_M = CuArray(fill(0.01f0, (ny,nx)))
-        self.d_H = CuArray(fill(0.988f0, (ny,nx)))
-        self.d_J = CuArray(fill(0.975f0, (ny,nx)))
-        self.d_D = CuArray(fill(0.003f0, (ny,nx)))
-        self.d_F = CuArray(fill(0.994f0, (ny,nx)))
-        self.d_XI = CuArray(fill(0.0001f0, (ny,nx)))
-
-        self.d_u = CuArray(u0)
-        self.d_du = CuArray(zeros(ny,nx))
-
-        self.Δv = zeros(ny,nx)
-
-        return self
-    end
-end
-
- - - -

The Laplacian function remains unchanged. The main change to the explicit gating solvers is that exp and expm1 functions are prefixed by CUDAnative.. This is a technical nuisance that will hopefully be resolved in future.

- - -
-function rush_larsen_gpu(g, α, β, Δt)
-    inf = α/(α+β)
-    τ = 1.0/(α+β)
-    return clamp(g + (g - inf) * CUDAnative.expm1(-Δt/τ), 0f0, 1f0)
-end
-
-function update_M_gpu(g, v, Δt)
-    # the condition is needed here to prevent NaN when v == 47.0
-    α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (CUDAnative.exp(-0.1f0*(v+47.0f0)) - 1.0f0)
-    β = (40.0f0 * CUDAnative.exp(-0.056f0*(v+72.0f0)))
-    return rush_larsen_gpu(g, α, β, Δt)
-end
-
-function update_H_gpu(g, v, Δt)
-    α = 0.126f0 * CUDAnative.exp(-0.25f0*(v+77.0f0))
-    β = 1.7f0 / (CUDAnative.exp(-0.082f0*(v+22.5f0)) + 1.0f0)
-    return rush_larsen_gpu(g, α, β, Δt)
-end
-
-function update_J_gpu(g, v, Δt)
-    α = (0.55f0 * CUDAnative.exp(-0.25f0*(v+78.0f0))) / (CUDAnative.exp(-0.2f0*(v+78.0f0)) + 1.0f0)
-    β = 0.3f0 / (CUDAnative.exp(-0.1f0*(v+32.0f0)) + 1.0f0)
-    return rush_larsen_gpu(g, α, β, Δt)
-end
-
-function update_D_gpu(g, v, Δt)
-    α = γ * (0.095f0 * CUDAnative.exp(-0.01f0*(v-5.0f0))) / (CUDAnative.exp(-0.072f0*(v-5.0f0)) + 1.0f0)
-    β = γ * (0.07f0 * CUDAnative.exp(-0.017f0*(v+44.0f0))) / (CUDAnative.exp(0.05f0*(v+44.0f0)) + 1.0f0)
-    return rush_larsen_gpu(g, α, β, Δt)
-end
-
-function update_F_gpu(g, v, Δt)
-    α = γ * (0.012f0 * CUDAnative.exp(-0.008f0*(v+28.0f0))) / (CUDAnative.exp(0.15f0*(v+28.0f0)) + 1.0f0)
-    β = γ * (0.0065f0 * CUDAnative.exp(-0.02f0*(v+30.0f0))) / (CUDAnative.exp(-0.2f0*(v+30.0f0)) + 1.0f0)
-    return rush_larsen_gpu(g, α, β, Δt)
-end
-
-function update_XI_gpu(g, v, Δt)
-    α = (0.0005f0 * CUDAnative.exp(0.083f0*(v+50.0f0))) / (CUDAnative.exp(0.057f0*(v+50.0f0)) + 1.0f0)
-    β = (0.0013f0 * CUDAnative.exp(-0.06f0*(v+20.0f0))) / (CUDAnative.exp(-0.04f0*(v+20.0f0)) + 1.0f0)
-    return rush_larsen_gpu(g, α, β, Δt)
-end
-
-function update_C_gpu(c, d, f, v, Δt)
-    ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c)
-    kCa = C_s * g_s * d * f
-    iCa = kCa * (v - ECa)
-    inf = 1.0f-7 * (0.07f0 - c)
-    τ = 1f0 / 0.07f0
-    return c + (c - inf) * CUDAnative.expm1(-Δt/τ)
-end
-
- - -
-update_C_gpu (generic function with 1 method)
-
- - -

Similarly, we modify the functions to calculate the individual currents by adding CUDAnative prefix.

- - -
-# iK1 is the inward-rectifying potassium current
-function calc_iK1(v)
-    ea = CUDAnative.exp(0.04f0*(v+85f0))
-    eb = CUDAnative.exp(0.08f0*(v+53f0))
-    ec = CUDAnative.exp(0.04f0*(v+53f0))
-    ed = CUDAnative.exp(-0.04f0*(v+23f0))
-    return 0.35f0 * (4f0*(ea-1f0)/(eb + ec)
-            + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed)))
-end
-
-# ix1 is the time-independent background potassium current
-function calc_ix1(v, xi)
-    ea = CUDAnative.exp(0.04f0*(v+77f0))
-    eb = CUDAnative.exp(0.04f0*(v+35f0))
-    return xi * 0.8f0 * (ea-1f0) / eb
-end
-
-# iNa is the sodium current (similar to the classic Hodgkin-Huxley model)
-function calc_iNa(v, m, h, j)
-    return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa)
-end
-
-# iCa is the calcium current
-function calc_iCa(v, d, f, c)
-    ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c)    # ECa is the calcium reversal potential
-    return C_s * g_s * d * f * (v - ECa)
-end
-
- - -
-calc_iCa (generic function with 1 method)
-
- - -

CUDA Kernels

-

A CUDA program does not directly deal with GPCs and SMs. The logical view of a CUDA program is in the term of blocks and threads. We have to specify the number of block and threads when running a CUDA kernel. Each thread runs on a single CUDA core. Threads are logically bundled into blocks, which are in turn specified on a grid. The grid stands for the entirety of the domain of interest.

-

Each thread can find its logical coordinate by using few pre-defined indexing variables (threadIdx, blockIdx, blockDim and gridDim) in C/C++ and the corresponding functions (e.g., threadIdx()) in Julia. There variables and functions are defined automatically for each thread and may return a different value depending on the calling thread. The return value of these functions is a 1, 2, or 3 dimensional structure whose elements can be accessed as .x, .y, and .z (for a 1-dimensional case, .x reports the actual index and .y and .z simply return 1). For example, if we deploy a kernel in 128 blocks and with 256 threads per block, each thread will see

-
    gridDim.x = 128;
-    blockDim=256;
-

while blockIdx.x ranges from 0 to 127 in C/C++ and 1 to 128 in Julia. Similarly, threadIdx.x will be between 0 to 255 in C/C++ (of course, in Julia the range will be 1 to 256).

-

A C/C++ thread can calculate its index as

-
    int idx = blockDim.x * blockIdx.x + threadIdx.x;
-

In Julia, we have to take into account base 1. Therefore, we use the following formula

-
    idx = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x
-

A CUDA programmer is free to interpret the calculated index however it fits the application, but in practice, it is usually interpreted as an index into input tensors.

-

In the GPU version of the solver, each thread works on a single element of the medium, indexed by a (x,y) pair. update_gates_gpu and update_du_gpu are very similar to their CPU counterparts but are in fact CUDA kernels where the for loops are replaced with CUDA specific indexing. Note that CUDA kernels cannot return a valve; hence, nothing at the end.

- - -
-function update_gates_gpu(u, XI, M, H, J, D, F, C, Δt)
-    i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x
-    j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y
-
-    v = Float32(u[i,j])
-
-    let Δt = Float32(Δt)
-        XI[i,j] = update_XI_gpu(XI[i,j], v, Δt)
-        M[i,j] = update_M_gpu(M[i,j], v, Δt)
-        H[i,j] = update_H_gpu(H[i,j], v, Δt)
-        J[i,j] = update_J_gpu(J[i,j], v, Δt)
-        D[i,j] = update_D_gpu(D[i,j], v, Δt)
-        F[i,j] = update_F_gpu(F[i,j], v, Δt)
-
-        C[i,j] = update_C_gpu(C[i,j], D[i,j], F[i,j], v, Δt)
-    end
-    nothing
-end
-
-function update_du_gpu(du, u, XI, M, H, J, D, F, C)
-    i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x
-    j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y
-
-    v = Float32(u[i,j])
-
-    # calculating individual currents
-    iK1 = calc_iK1(v)
-    ix1 = calc_ix1(v, XI[i,j])
-    iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j])
-    iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j])
-
-    # total current
-    I_sum = iK1 + ix1 + iNa + iCa
-
-    # the reaction part of the reaction-diffusion equation
-    du[i,j] = -I_sum / C_m
-    nothing
-end
-
- - -
-update_du_gpu (generic function with 1 method)
-
- - -

Implicit Solver

-

Finally, the deriv function is modified to copy u to GPU and copy du back and to invoke CUDA kernels.

- - -
-function (f::BeelerReuterGpu)(du, u, p, t)
-    L = 16   # block size
-    Δt = t - f.t
-    copyto!(f.d_u, u)
-    ny, nx = size(u)
-
-    if Δt != 0 || t == 0
-        @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_gates_gpu(
-            f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C, Δt)
-        f.t = t
-    end
-
-    laplacian(f.Δv, u)
-
-    # calculate the reaction portion
-    @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_du_gpu(
-        f.d_du, f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C)
-
-    copyto!(du, f.d_du)
-
-    # ...add the diffusion portion
-    du .+= f.diff_coef .* f.Δv
-end
-
- - - -

Ready to test!

- - -
-using DifferentialEquations, Sundials
-
-deriv_gpu = BeelerReuterGpu(u0, 1.0);
-prob = ODEProblem(deriv_gpu, u0, (0.0, 50.0));
-@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0);
-
- - -
-12.468956 seconds (13.13 M allocations: 2.201 GiB, 4.65% gc time)
-
- - - -
-heatmap(sol.u[end])
-
- - - - -

Summary

-

We achieve around a 6x speedup with running the explicit portion of our IMEX solver on a GPU. The major bottleneck of this technique is the communication between CPU and GPU. In its current form, not all of the internals of the method utilize GPU acceleration. In particular, the implicit equations solved by GMRES are performed on the CPU. This partial CPU nature also increases the amount of data transfer that is required between the GPU and CPU (performed every f call). Compiling the full ODE solver to the GPU would solve both of these issues and potentially give a much larger speedup. JuliaDiffEq developers are currently working on solutions to alleviate these issues, but these will only be compatible with native Julia solvers (and not Sundials).

- - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("advanced","beeler_reuter.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/introduction/callbacks_and_events.html b/html/introduction/callbacks_and_events.html deleted file mode 100644 index 281e92a0..00000000 --- a/html/introduction/callbacks_and_events.html +++ /dev/null @@ -1,1337 +0,0 @@ - - - - - - Callbacks and Events - - - - - - - - - - - - - - - - - -
-
-
- -
-

Callbacks and Events

-
Chris Rackauckas
- -
- -

In working with a differential equation, our system will evolve through many states. Particular states of the system may be of interest to us, and we say that an ***"event"*** is triggered when our system reaches these states. For example, events may include the moment when our system reaches a particular temperature or velocity. We ***handle*** these events with ***callbacks***, which tell us what to do once an event has been triggered.

-

These callbacks allow for a lot more than event handling, however. For example, we can use callbacks to achieve high-level behavior like exactly preserve conservation laws and save the trace of a matrix at pre-defined time points. This extra functionality allows us to use the callback system as a modding system for the DiffEq ecosystem's solvers.

-

This tutorial is an introduction to the callback and event handling system in DifferentialEquations.jl, documented in the Event Handling and Callback Functions page of the documentation. We will also introduce you to some of the most widely used callbacks in the Callback Library, which is a library of pre-built mods.

-

Events and Continuous Callbacks

-

Event handling is done through continuous callbacks. Callbacks take a function, condition, which triggers an affect! when condition == 0. These callbacks are called "continuous" because they will utilize rootfinding on the interpolation to find the "exact" time point at which the condition takes place and apply the affect! at that time point.

-

***Let's use a bouncing ball as a simple system to explain events and callbacks.*** Let's take Newton's model of a ball falling towards the Earth's surface via a gravitational constant g. In this case, the velocity is changing via -g, and position is changing via the velocity. Therefore we receive the system of ODEs:

- - -
-using DifferentialEquations, ParameterizedFunctions
-ball! = @ode_def BallBounce begin
-  dy =  v
-  dv = -g
-end g
-
- - -
-(::Main.WeaveSandBox2.BallBounce{getfield(Main.WeaveSandBox2, Symbol("##1#5
-")),getfield(Main.WeaveSandBox2, Symbol("##2#6")),getfield(Main.WeaveSandBo
-x2, Symbol("##3#7")),Nothing,Nothing,getfield(Main.WeaveSandBox2, Symbol("#
-#4#8")),Expr,Expr}) (generic function with 2 methods)
-
- - -

We want the callback to trigger when y=0 since that's when the ball will hit the Earth's surface (our event). We do this with the condition:

- - -
-function condition(u,t,integrator)
-  u[1]
-end
-
- - -
-condition (generic function with 1 method)
-
- - -

Recall that the condition will trigger when it evaluates to zero, and here it will evaluate to zero when u[1] == 0, which occurs when v == 0. Now we have to say what we want the callback to do. Callbacks make use of the Integrator Interface. Instead of giving a full description, a quick and usable rundown is:

-
    -
  • Values are strored in integrator.u

    -
  • -
  • Times are stored in integrator.t

    -
  • -
  • The parameters are stored in integrator.p

    -
  • -
  • integrator(t) performs an interpolation in the current interval between integrator.tprev and integrator.t (and allows extrapolation)

    -
  • -
  • User-defined options (tolerances, etc.) are stored in integrator.opts

    -
  • -
  • integrator.sol is the current solution object. Note that integrator.sol.prob is the current problem

    -
  • -
-

While there's a lot more on the integrator interface page, that's a working knowledge of what's there.

-

What we want to do with our affect! is to "make the ball bounce". Mathematically speaking, the ball bounces when the sign of the velocity flips. As an added behavior, let's also use a small friction constant to dampen the ball's velocity. This way only a percentage of the velocity will be retained when the event is triggered and the callback is used. We'll define this behavior in the affect! function:

- - -
-function affect!(integrator)
-    integrator.u[2] = -integrator.p[2] * integrator.u[2]
-end
-
- - -
-affect! (generic function with 1 method)
-
- - -

integrator.u[2] is the second value of our model, which is v or velocity, and integrator.p[2], is our friction coefficient.

-

Therefore affect! can be read as follows: affect! will take the current value of velocity, and multiply it -1 multiplied by our friction coefficient. Therefore the ball will change direction and its velocity will dampen when affect! is called.

-

Now let's build the ContinuousCallback:

- - -
-bounce_cb = ContinuousCallback(condition,affect!)
-
- - -
-DiffEqBase.ContinuousCallback{typeof(Main.WeaveSandBox2.condition),typeof(M
-ain.WeaveSandBox2.affect!),typeof(Main.WeaveSandBox2.affect!),typeof(DiffEq
-Base.INITIALIZE_DEFAULT),Float64,Int64,Nothing}(Main.WeaveSandBox2.conditio
-n, Main.WeaveSandBox2.affect!, Main.WeaveSandBox2.affect!, DiffEqBase.INITI
-ALIZE_DEFAULT, nothing, true, 10, Bool[true, true], 2.220446049250313e-15, 
-0)
-
- - -

Now let's make an ODEProblem which has our callback:

- - -
-u0 = [50.0,0.0]
-tspan = (0.0,15.0)
-p = (9.8,0.9)
-prob = ODEProblem(ball!,u0,tspan,p,callback=bounce_cb)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 15.0)
-u0: [50.0, 0.0]
-
- - -

Notice that we chose a friction constant of 0.9. Now we can solve the problem and plot the solution as we normally would:

- - -
-sol = solve(prob,Tsit5())
-using Plots; gr()
-plot(sol)
-
- - - - -

and tada, the ball bounces! Notice that the ContinuousCallback is using the interpolation to apply the effect "exactly" when v == 0. This is crucial for model correctness, and thus when this property is needed a ContinuousCallback should be used.

-

Exercise 1

-

In our example we used a constant coefficient of friction, but if we are bouncing the ball in the same place we may be smoothing the surface (say, squishing the grass), causing there to be less friction after each bounce. In this more advanced model, we want the friction coefficient at the next bounce to be sqrt(friction) from the previous bounce (since friction < 1, sqrt(friction) > friction and sqrt(friction) < 1).

-

Hint: there are many ways to implement this. One way to do it is to make p a Vector and mutate the friction coefficient in the affect!.

-

Discrete Callbacks

-

A discrete callback checks a condition after every integration step and, if true, it will apply an affect!. For example, let's say that at time t=2 we want to include that a kid kicked the ball, adding 20 to the current velocity. This kind of situation, where we want to add a specific behavior which does not require rootfinding, is a good candidate for a DiscreteCallback. In this case, the condition is a boolean for whether to apply the affect!, so:

- - -
-function condition_kick(u,t,integrator)
-    t == 2
-end
-
- - -
-condition_kick (generic function with 1 method)
-
- - -

We want the kick to occur at t=2, so we check for that time point. When we are at this time point, we want to do:

- - -
-function affect_kick!(integrator)
-    integrator.u[2] += 50
-end
-
- - -
-affect_kick! (generic function with 1 method)
-
- - -

Now we build the problem as before:

- - -
-kick_cb = DiscreteCallback(condition_kick,affect_kick!)
-u0 = [50.0,0.0]
-tspan = (0.0,10.0)
-p = (9.8,0.9)
-prob = ODEProblem(ball!,u0,tspan,p,callback=kick_cb)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 10.0)
-u0: [50.0, 0.0]
-
- - -

Note that, since we are requiring our effect at exactly the time t=2, we need to tell the integration scheme to step at exactly t=2 to apply this callback. This is done via the option tstops, which is like saveat but means "stop at these values".

- - -
-sol = solve(prob,Tsit5(),tstops=[2.0])
-plot(sol)
-
- - - - -

Note that this example could've been done with a ContinuousCallback by checking the condition t-2.

-

Merging Callbacks with Callback Sets

-

In some cases you may want to merge callbacks to build up more complex behavior. In our previous result, notice that the model is unphysical because the ball goes below zero! What we really need to do is add the bounce callback together with the kick. This can be achieved through the CallbackSet.

- - -
-cb = CallbackSet(bounce_cb,kick_cb)
-
- - -
-DiffEqBase.CallbackSet{Tuple{DiffEqBase.ContinuousCallback{typeof(Main.Weav
-eSandBox2.condition),typeof(Main.WeaveSandBox2.affect!),typeof(Main.WeaveSa
-ndBox2.affect!),typeof(DiffEqBase.INITIALIZE_DEFAULT),Float64,Int64,Nothing
-}},Tuple{DiffEqBase.DiscreteCallback{typeof(Main.WeaveSandBox2.condition_ki
-ck),typeof(Main.WeaveSandBox2.affect_kick!),typeof(DiffEqBase.INITIALIZE_DE
-FAULT)}}}((DiffEqBase.ContinuousCallback{typeof(Main.WeaveSandBox2.conditio
-n),typeof(Main.WeaveSandBox2.affect!),typeof(Main.WeaveSandBox2.affect!),ty
-peof(DiffEqBase.INITIALIZE_DEFAULT),Float64,Int64,Nothing}(Main.WeaveSandBo
-x2.condition, Main.WeaveSandBox2.affect!, Main.WeaveSandBox2.affect!, DiffE
-qBase.INITIALIZE_DEFAULT, nothing, true, 10, Bool[true, true], 2.2204460492
-50313e-15, 0),), (DiffEqBase.DiscreteCallback{typeof(Main.WeaveSandBox2.con
-dition_kick),typeof(Main.WeaveSandBox2.affect_kick!),typeof(DiffEqBase.INIT
-IALIZE_DEFAULT)}(Main.WeaveSandBox2.condition_kick, Main.WeaveSandBox2.affe
-ct_kick!, DiffEqBase.INITIALIZE_DEFAULT, Bool[true, true]),))
-
- - -

A CallbackSet merges their behavior together. The logic is as follows. In a given interval, if there are multiple continuous callbacks that would trigger, only the one that triggers at the earliest time is used. The time is pulled back to where that continuous callback is triggered, and then the DiscreteCallbacks in the callback set are called in order.

- - -
-u0 = [50.0,0.0]
-tspan = (0.0,15.0)
-p = (9.8,0.9)
-prob = ODEProblem(ball!,u0,tspan,p,callback=cb)
-sol = solve(prob,Tsit5(),tstops=[2.0])
-plot(sol)
-
- - - - -

Notice that we have now merged the behaviors. We can then nest this as deep as we like.

-

Exercise 2

-

Add to the model a linear wind with resistance that changes the acceleration to -g + k*v after t=10. Do so by adding another parameter and allowing it to be zero until a specific time point where a third callback triggers the change.

-

Integration Termination and Directional Handling

-

Let's look at another model now: the model of the Harmonic Oscillator. We can write this as:

- - -
-u0 = [1.,0.]
-harmonic! = @ode_def HarmonicOscillator begin
-   dv = -x
-   dx = v
-end
-tspan = (0.0,10.0)
-prob = ODEProblem(harmonic!,u0,tspan)
-sol = solve(prob)
-plot(sol)
-
- - - - -

Let's instead stop the integration when a condition is met. From the Integrator Interface stepping controls we see that terminate!(integrator) will cause the integration to end. So our new affect! is simply:

- - -
-function terminate_affect!(integrator)
-    terminate!(integrator)
-end
-
- - -
-terminate_affect! (generic function with 1 method)
-
- - -

Let's first stop the integration when the particle moves back to x=0. This means we want to use the condition:

- - -
-function terminate_condition(u,t,integrator)
-    u[2]
-end
-terminate_cb = ContinuousCallback(terminate_condition,terminate_affect!)
-
- - -
-DiffEqBase.ContinuousCallback{typeof(Main.WeaveSandBox2.terminate_condition
-),typeof(Main.WeaveSandBox2.terminate_affect!),typeof(Main.WeaveSandBox2.te
-rminate_affect!),typeof(DiffEqBase.INITIALIZE_DEFAULT),Float64,Int64,Nothin
-g}(Main.WeaveSandBox2.terminate_condition, Main.WeaveSandBox2.terminate_aff
-ect!, Main.WeaveSandBox2.terminate_affect!, DiffEqBase.INITIALIZE_DEFAULT, 
-nothing, true, 10, Bool[true, true], 2.220446049250313e-15, 0)
-
- - -

Note that instead of adding callbacks to the problem, we can also add them to the solve command. This will automatically form a CallbackSet with any problem-related callbacks and naturally allows you to distinguish between model features and integration controls.

- - -
-sol = solve(prob,callback=terminate_cb)
-plot(sol)
-
- - - - -

Notice that the harmonic oscilator's true solution here is sin and cosine, and thus we would expect this return to zero to happen at t=π:

- - -
-sol.t[end]
-
- - -
-3.1415902498226123
-
- - -

This is one way to approximate π! Lower tolerances and arbitrary precision numbers can make this more exact, but let's not look at that. Instead, what if we wanted to halt the integration after exactly one cycle? To do so we would need to ignore the first zero-crossing. Luckily in these types of scenarios there's usually a structure to the problem that can be exploited. Here, we only want to trigger the affect! when crossing from positive to negative, and not when crossing from negative to positive. In other words, we want our affect! to only occur on upcrossings.

-

If the ContinuousCallback constructor is given a single affect!, it will occur on both upcrossings and downcrossings. If there are two affect!s given, then the first is for upcrossings and the second is for downcrossings. An affect! can be ignored by using nothing. Together, the "upcrossing-only" version of the effect means that the first affect! is what we defined above and the second is nothing. Therefore we want:

- - -
-terminate_upcrossing_cb = ContinuousCallback(terminate_condition,terminate_affect!,nothing)
-
- - -
-DiffEqBase.ContinuousCallback{typeof(Main.WeaveSandBox2.terminate_condition
-),typeof(Main.WeaveSandBox2.terminate_affect!),Nothing,typeof(DiffEqBase.IN
-ITIALIZE_DEFAULT),Float64,Int64,Nothing}(Main.WeaveSandBox2.terminate_condi
-tion, Main.WeaveSandBox2.terminate_affect!, nothing, DiffEqBase.INITIALIZE_
-DEFAULT, nothing, true, 10, Bool[true, true], 2.220446049250313e-15, 0)
-
- - -

Which gives us:

- - -
-sol = solve(prob,callback=terminate_upcrossing_cb)
-plot(sol)
-
- - - - -

Callback Library

-

As you can see, callbacks can be very useful and through CallbackSets we can merge together various behaviors. Because of this utility, there is a library of pre-built callbacks known as the Callback Library. We will walk through a few examples where these callbacks can come in handy.

-

Manifold Projection

-

One callback is the manifold projection callback. Essentially, you can define any manifold g(sol)=0 which the solution must live on, and cause the integration to project to that manifold after every step. As an example, let's see what happens if we naively run the harmonic oscillator for a long time:

- - -
-tspan = (0.0,10000.0)
-prob = ODEProblem(harmonic!,u0,tspan)
-sol = solve(prob)
-gr(fmt=:png) # Make it a PNG instead of an SVG since there's a lot of points!
-plot(sol,vars=(1,2))
-
- - - - - -
-plot(sol,vars=(0,1),denseplot=false)
-
- - - - -

Notice that what's going on is that the numerical solution is drifting from the true solution over this long time scale. This is because the integrator is not conserving energy.

- - -
-plot(sol.t,[u[2]^2 + u[1]^2 for u in sol.u]) # Energy ~ x^2 + v^2
-
- - - - -

Some integration techniques like symplectic integrators are designed to mitigate this issue, but instead let's tackle the problem by enforcing conservation of energy. To do so, we define our manifold as the one where energy equals 1 (since that holds in the initial condition), that is:

- - -
-function g(resid,u,p,t)
-  resid[1] = u[2]^2 + u[1]^2 - 1
-  resid[2] = 0
-end
-
- - -
-g (generic function with 1 method)
-
- - -

Here the residual measures how far from our desired energy we are, and the number of conditions matches the size of our system (we ignored the second one by making the residual 0). Thus we define a ManifoldProjection callback and add that to the solver:

- - -
-cb = ManifoldProjection(g)
-sol = solve(prob,callback=cb)
-plot(sol,vars=(1,2))
-
- - - - - -
-plot(sol,vars=(0,1),denseplot=false)
-
- - - - -

Now we have "perfect" energy conservation, where if it's ever violated too much the solution will get projected back to energy=1.

- - -
-u1,u2 = sol[500]
-u2^2 + u1^2
-
- - -
-1.0000425845647514
-
- - -

While choosing different integration schemes and using lower tolerances can achieve this effect as well, this can be a nice way to enforce physical constraints and is thus used in many disciplines like molecular dynamics. Another such domain constraining callback is the PositiveCallback() which can be used to enforce positivity of the variables.

-

SavingCallback

-

The SavingCallback can be used to allow for special saving behavior. Let's take a linear ODE define on a system of 1000x1000 matrices:

- - -
-prob = ODEProblem((du,u,p,t)->du.=u,rand(1000,1000),(0.0,1.0))
-
- - -
-ODEProblem with uType Array{Float64,2} and tType Float64. In-place: true
-timespan: (0.0, 1.0)
-u0: [0.0743618 0.0514087 … 0.601962 0.923609; 0.98813 0.400411 … 0.0804066 
-0.280282; … ; 0.894011 0.961291 … 0.489454 0.350763; 0.972432 0.558653 … 0.
-67446 0.516072]
-
- - -

In fields like quantum mechanics you may only want to know specific properties of the solution such as the trace or the norm of the matrix. Saving all of the 1000x1000 matrices can be a costly way to get this information! Instead, we can use the SavingCallback to save the trace and norm at specified times. To do so, we first define our SavedValues cache. Our time is in terms of Float64, and we want to save tuples of Float64s (one for the trace and one for the norm), and thus we generate the cache as:

- - -
-saved_values = SavedValues(Float64, Tuple{Float64,Float64})
-
- - -
-SavedValues{tType=Float64, savevalType=Tuple{Float64,Float64}}
-t:
-Float64[]
-saveval:
-Tuple{Float64,Float64}[]
-
- - -

Now we define the SavingCallback by giving it a function of (u,p,t,integrator) that returns the values to save, and the cache:

- - -
-using LinearAlgebra
-cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values)
-
- - -
-DiffEqBase.DiscreteCallback{getfield(DiffEqCallbacks, Symbol("##28#29")),Di
-ffEqCallbacks.SavingAffect{getfield(Main.WeaveSandBox2, Symbol("##21#22")),
-Float64,Tuple{Float64,Float64},DataStructures.BinaryHeap{Float64,DataStruct
-ures.LessThan},Array{Float64,1}},typeof(DiffEqCallbacks.saving_initialize)}
-(getfield(DiffEqCallbacks, Symbol("##28#29"))(), DiffEqCallbacks.SavingAffe
-ct{getfield(Main.WeaveSandBox2, Symbol("##21#22")),Float64,Tuple{Float64,Fl
-oat64},DataStructures.BinaryHeap{Float64,DataStructures.LessThan},Array{Flo
-at64,1}}(getfield(Main.WeaveSandBox2, Symbol("##21#22"))(), SavedValues{tTy
-pe=Float64, savevalType=Tuple{Float64,Float64}}
-t:
-Float64[]
-saveval:
-Tuple{Float64,Float64}[], DataStructures.BinaryHeap{Float64,DataStructures.
-LessThan}(DataStructures.LessThan(), Float64[]), Float64[], true, true, 0),
- DiffEqCallbacks.saving_initialize, Bool[false, false])
-
- - -

Here we take u and save (tr(u),norm(u)). When we solve with this callback:

- - -
-sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 0-element Array{Float64,1}
-u: 0-element Array{Array{Float64,2},1}
-
- - -

Our values are stored in our saved_values variable:

- - -
-saved_values.t
-
- - -
-5-element Array{Float64,1}:
- 0.0                
- 0.10012874568410042
- 0.3483893698204804 
- 0.6837344825957224 
- 1.0
-
- - - -
-saved_values.saveval
-
- - -
-5-element Array{Tuple{Float64,Float64},1}:
- (495.44156991744, 577.2756530262762)    
- (547.6181136204315, 638.0704069743907)  
- (701.9335867834924, 817.8747894712187)  
- (981.5999868986812, 1143.7348172902548) 
- (1346.7497573793933, 1569.1978486660328)
-
- - -

By default this happened only at the solver's steps. But the SavingCallback has similar controls as the integrator. For example, if we want to save at every 0.1 seconds, we do can so using saveat:

- - -
-saved_values = SavedValues(Float64, Tuple{Float64,Float64}) # New cache
-cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values, saveat = 0.0:0.1:1.0)
-sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 0-element Array{Float64,1}
-u: 0-element Array{Array{Float64,2},1}
-
- - - -
-saved_values.t
-
- - -
-11-element Array{Float64,1}:
- 0.0
- 0.1
- 0.2
- 0.3
- 0.4
- 0.5
- 0.6
- 0.7
- 0.8
- 0.9
- 1.0
-
- - - -
-saved_values.saveval
-
- - -
-11-element Array{Tuple{Float64,Float64},1}:
- (495.44156991744, 577.2756530262762)    
- (547.54761469005, 637.9882634512298)    
- (605.1337216268632, 705.086100018979)   
- (668.7761265246788, 779.2405777178761)  
- (739.1121045789197, 861.1942330586455)  
- (816.8450126888607, 951.7665992375188)  
- (902.7530484925851, 1051.8644119363264) 
- (997.6968024681119, 1162.490408834653)  
- (1102.6255792832562, 1284.750695082544) 
- (1218.5892351062835, 1419.868535827534) 
- (1346.7497573793933, 1569.1978486660328)
-
- - -

Exercise 3

-

Go back to the Harmonic oscillator. Use the SavingCallback to save an array for the energy over time, and do this both with and without the ManifoldProjection. Plot the results to see the difference the projection makes.

- - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("introduction","callbacks_and_events.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/introduction/choosing_algs.html b/html/introduction/choosing_algs.html deleted file mode 100644 index ff8bf277..00000000 --- a/html/introduction/choosing_algs.html +++ /dev/null @@ -1,1047 +0,0 @@ - - - - - - Choosing an ODE Algorithm - - - - - - - - - - - - - - - - - -
-
-
- -
-

Choosing an ODE Algorithm

-
Chris Rackauckas
- -
- -

While the default algorithms, along with alg_hints = [:stiff], will suffice in most cases, there are times when you may need to exert more control. The purpose of this part of the tutorial is to introduce you to some of the most widely used algorithm choices and when they should be used. The corresponding page of the documentation is the ODE Solvers page which goes into more depth.

-

Diagnosing Stiffness

-

One of the key things to know for algorithm choices is whether your problem is stiff. Let's take for example the driven Van Der Pol equation:

- - -
-using DifferentialEquations, ParameterizedFunctions
-van! = @ode_def VanDerPol begin
-  dy = μ*((1-x^2)*y - x)
-  dx = 1*y
-end μ
-
-prob = ODEProblem(van!,[0.0,2.0],(0.0,6.3),1e6)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 6.3)
-u0: [0.0, 2.0]
-
- - -

One indicating factor that should alert you to the fact that this model may be stiff is the fact that the parameter is 1e6: large parameters generally mean stiff models. If we try to solve this with the default method:

- - -
-sol = solve(prob,Tsit5())
-
- - -
-retcode: MaxIters
-Interpolation: specialized 4th order "free" interpolation
-t: 999978-element Array{Float64,1}:
- 0.0                  
- 4.997501249375313e-10
- 5.4972513743128435e-9
- 3.289910508569359e-8 
- 9.055579963360815e-8 
- 1.7309488667844897e-7
- 2.793755093774501e-7 
- 4.1495265736211405e-7
- 5.807909425207012e-7 
- 7.812799072271413e-7 
- ⋮                    
- 1.8458642276751214   
- 1.8458657169793589   
- 1.8458672062862802   
- 1.8458686955958856   
- 1.8458701849081751   
- 1.8458716742231487   
- 1.8458731635408063   
- 1.845874652861148    
- 1.8458761421841738   
-u: 999978-element Array{Array{Float64,1},1}:
- [0.0, 2.0]          
- [-0.000998751, 2.0] 
- [-0.0109043, 2.0]   
- [-0.0626554, 2.0]   
- [-0.158595, 2.0]    
- [-0.270036, 2.0]    
- [-0.37832, 2.0]     
- [-0.474679, 2.0]    
- [-0.54993, 2.0]     
- [-0.602693, 2.0]    
- ⋮                   
- [-0.777554, 1.83158]
- [-0.777555, 1.83158]
- [-0.777556, 1.83158]
- [-0.777557, 1.83158]
- [-0.777558, 1.83158]
- [-0.777559, 1.83158]
- [-0.77756, 1.83157] 
- [-0.777561, 1.83157]
- [-0.777562, 1.83157]
-
- - -

Here it shows that maximum iterations were reached. Another thing that can happen is that the solution can return that the solver was unstable (exploded to infinity) or that dt became too small. If these happen, the first thing to do is to check that your model is correct. It could very well be that you made an error that causes the model to be unstable!

-

If the model is the problem, then stiffness could be the reason. We can thus hint to the solver to use an appropriate method:

- - -
-sol = solve(prob,alg_hints = [:stiff])
-
- - -
-retcode: Success
-Interpolation: specialized 3rd order "free" stiffness-aware interpolation
-t: 698-element Array{Float64,1}:
- 0.0                  
- 4.997501249375313e-10
- 5.454146363961655e-9 
- 1.895430203585047e-8 
- 4.1496576804042804e-8
- 7.308070493248608e-8 
- 1.1714620297820683e-7
- 1.7481247411127233e-7
- 2.4862286700158605e-7
- 3.402538577740189e-7 
- ⋮                    
- 5.7398178602086185   
- 5.799476609476999    
- 5.872749556131411    
- 5.953775882573488    
- 6.040228149554932    
- 6.12689077908994     
- 6.213553408624947    
- 6.28666936651838     
- 6.3                  
-u: 698-element Array{Array{Float64,1},1}:
- [0.0, 2.0]          
- [-0.000998751, 2.0] 
- [-0.0108195, 2.0]   
- [-0.0368509, 2.0]   
- [-0.0780351, 2.0]   
- [-0.131248, 2.0]    
- [-0.19755, 2.0]     
- [-0.272074, 2.0]    
- [-0.350452, 2.0]    
- [-0.426453, 2.0]    
- ⋮                   
- [0.702771, -1.93874]
- [0.73069, -1.896]   
- [0.770492, -1.84104]
- [0.823974, -1.77652]
- [0.896988, -1.70228]
- [0.996615, -1.62047]
- [1.14431, -1.52816] 
- [1.34708, -1.43771] 
- [1.39879, -1.41942]
-
- - -

Or we can use the default algorithm. By default, DifferentialEquations.jl uses algorithms like AutoTsit5(Rodas5()) which automatically detect stiffness and switch to an appropriate method once stiffness is known.

- - -
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 1927-element Array{Float64,1}:
- 0.0                  
- 4.997501249375313e-10
- 5.4972513743128435e-9
- 3.289910508569359e-8 
- 9.055579963360815e-8 
- 1.7309488667844897e-7
- 2.793755093774501e-7 
- 4.1495265736211405e-7
- 5.807909425207012e-7 
- 7.812799072271413e-7 
- ⋮                    
- 6.204647119147191    
- 6.219555078801004    
- 6.233840698784528    
- 6.247503396702821    
- 6.26054616845715     
- 6.272975180407388    
- 6.284799377914966    
- 6.296030113262939    
- 6.3                  
-u: 1927-element Array{Array{Float64,1},1}:
- [0.0, 2.0]         
- [-0.000998751, 2.0]
- [-0.0109043, 2.0]  
- [-0.0626554, 2.0]  
- [-0.158595, 2.0]   
- [-0.270036, 2.0]   
- [-0.37832, 2.0]    
- [-0.474679, 2.0]   
- [-0.54993, 2.0]    
- [-0.602693, 2.0]   
- ⋮                  
- [1.11731, -1.54298]
- [1.14817, -1.5261] 
- [1.1805, -1.50946] 
- [1.21435, -1.49311]
- [1.24979, -1.47704]
- [1.28689, -1.46128]
- [1.3257, -1.44583] 
- [1.36632, -1.43072]
- [1.38188, -1.42526]
-
- - -

Another way to understand stiffness is to look at the solution.

- - -
-using Plots; gr()
-sol = solve(prob,alg_hints = [:stiff],reltol=1e-6)
-plot(sol,denseplot=false)
-
- - - - -

Let's zoom in on the y-axis to see what's going on:

- - -
-plot(sol,ylims = (-10.0,10.0))
-
- - - - -

Notice how there are some extreme vertical shifts that occur. These vertical shifts are places where the derivative term is very large, and this is indicative of stiffness. This is an extreme example to highlight the behavior, but this general idea can be carried over to your problem. When in doubt, simply try timing using both a stiff solver and a non-stiff solver and see which is more efficient.

-

To try this out, let's use BenchmarkTools, a package that let's us relatively reliably time code blocks.

- - -
-function lorenz!(du,u,p,t)
-    σ,ρ,β = p
-    du[1] = σ*(u[2]-u[1])
-    du[2] = u[1]*(ρ-u[3]) - u[2]
-    du[3] = u[1]*u[2] - β*u[3]
-end
-u0 = [1.0,0.0,0.0]
-p = (10,28,8/3)
-tspan = (0.0,100.0)
-prob = ODEProblem(lorenz!,u0,tspan,p)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 100.0)
-u0: [1.0, 0.0, 0.0]
-
- - -

And now, let's use the @btime macro from benchmark tools to compare the use of non-stiff and stiff solvers on this problem.

- - -
-using BenchmarkTools
-@btime solve(prob);
-
- - -
-841.600 μs (13053 allocations: 1.41 MiB)
-
- - - -
-@btime solve(prob,alg_hints = [:stiff]);
-
- - -
-4.901 ms (17261 allocations: 1.52 MiB)
-
- - -

In this particular case, we can see that non-stiff solvers get us to the solution much more quickly.

-

The Recommended Methods

-

When picking a method, the general rules are as follows:

-
    -
  • Higher order is more efficient at lower tolerances, lower order is more efficient at higher tolerances

    -
  • -
  • Adaptivity is essential in most real-world scenarios

    -
  • -
  • Runge-Kutta methods do well with non-stiff equations, Rosenbrock methods do well with small stiff equations, BDF methods do well with large stiff equations

    -
  • -
-

While there are always exceptions to the rule, those are good guiding principles. Based on those, a simple way to choose methods is:

-
    -
  • The default is Tsit5(), a non-stiff Runge-Kutta method of Order 5

    -
  • -
  • If you use low tolerances (1e-8), try Vern7() or Vern9()

    -
  • -
  • If you use high tolerances, try BS3()

    -
  • -
  • If the problem is stiff, try Rosenbrock23(), Rodas5(), or CVODE_BDF()

    -
  • -
  • If you don't know, use AutoTsit5(Rosenbrock23()) or AutoVern9(Rodas5()).

    -
  • -
-

(This is a simplified version of the default algorithm chooser)

-

Comparison to other Software

-

If you are familiar with MATLAB, SciPy, or R's DESolve, here's a quick translation start to have transfer your knowledge over.

-
    -
  • ode23 -> BS3()

    -
  • -
  • ode45/dopri5 -> DP5(), though in most cases Tsit5() is more efficient

    -
  • -
  • ode23s -> Rosenbrock23(), though in most cases Rodas4() is more efficient

    -
  • -
  • ode113 -> VCABM(), though in many cases Vern7() is more efficient

    -
  • -
  • dop853 -> DP8(), though in most cases Vern7() is more efficient

    -
  • -
  • ode15s/vode -> QNDF(), though in many cases CVODE_BDF(), Rodas4() or radau() are more efficient

    -
  • -
  • ode23t -> Trapezoid() for efficiency and GenericTrapezoid() for robustness

    -
  • -
  • ode23tb -> TRBDF2

    -
  • -
  • lsoda -> lsoda() (requires ]add LSODA; using LSODA)

    -
  • -
  • ode15i -> IDA(), though in many cases Rodas4() can handle the DAE and is significantly more efficient

    -
  • -
- - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("introduction","choosing_algs.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/introduction/formatting_plots.html b/html/introduction/formatting_plots.html deleted file mode 100644 index a43f8d55..00000000 --- a/html/introduction/formatting_plots.html +++ /dev/null @@ -1,925 +0,0 @@ - - - - - - Formatting Plots - - - - - - - - - - - - - - - - - -
-
-
- -
-

Formatting Plots

-
Chris Rackauckas
- -
- -

Since the plotting functionality is implemented as a recipe to Plots.jl, all of the options open to Plots.jl can be used in our plots. In addition, there are special features specifically for differential equation plots. This tutorial will teach some of the most commonly used options. Let's first get the solution to some ODE. Here I will use one of the Lorenz ordinary differential equation. As with all commands in DifferentialEquations.jl, I got a plot of the solution by calling solve on the problem, and plot on the solution:

- - -
-using DifferentialEquations, Plots, ParameterizedFunctions
-gr()
-lorenz = @ode_def Lorenz begin
-  dx = σ*(y-x)
-  dy = ρ*x-y-x*z
-  dz = x*y-β*z
-end σ β ρ
-
-p = [10.0,8/3,28]
-u0 = [1., 5., 10.]
-tspan = (0., 100.)
-prob = ODEProblem(lorenz, u0, tspan, p)
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 1360-element Array{Float64,1}:
-   0.0                
-   0.0354861341350177 
-   0.0606639441609802 
-   0.10188862127423248
-   0.1448494744943986 
-   0.19835643663680771
-   0.25049906268405814
-   0.3056767768178228 
-   0.354528003497134  
-   0.4077097758394896 
-   ⋮                  
-  99.45257848856423   
-  99.5206415332703    
-  99.59156525518577   
-  99.65749274487298   
-  99.7343767284518    
-  99.80001907955017   
-  99.87708602888114   
-  99.96443435623007   
- 100.0                
-u: 1360-element Array{Array{Float64,1},1}:
- [1.0, 5.0, 10.0]            
- [2.31565, 5.89756, 9.40679] 
- [3.23779, 7.04103, 9.23368] 
- [4.99386, 9.83293, 9.62611] 
- [7.42116, 13.9492, 11.5823] 
- [11.4597, 19.7531, 18.1042] 
- [15.4761, 21.5109, 29.8871] 
- [16.4475, 13.1242, 40.9711] 
- [12.8778, 2.61892, 41.2525] 
- [7.13698, -3.09341, 35.5052]
- ⋮                           
- [11.856, 5.92339, 36.9333]  
- [7.34741, 0.974252, 32.65]  
- [3.75943, 0.129304, 27.1489]
- [2.12612, 0.641365, 22.8278]
- [1.59165, 1.53693, 18.7264] 
- [1.82744, 2.58043, 15.9256] 
- [2.79845, 4.63575, 13.5359] 
- [5.22745, 9.15156, 12.844]  
- [6.82718, 11.9177, 13.8365]
-
- - - -
-plot(sol)
-
- - - - -

Now let's change it to a phase plot. As discussed in the plot functions page, we can use the vars command to choose the variables to plot. Let's plot variable x vs variable y vs variable z:

- - -
-plot(sol,vars=(:x,:y,:z))
-
- - -
-ERROR: MethodError: no method matching pointer(::Symbol)
-Closest candidates are:
-  pointer(!Matched::String) at strings/string.jl:81
-  pointer(!Matched::String, !Matched::Integer) at strings/string.jl:82
-  pointer(!Matched::SubString{String}) at strings/substring.jl:104
-  ...
-
- - -

We can also choose to plot the timeseries for a single variable:

- - -
-plot(sol,vars=[:x])
-
- - - - -

Notice that we were able to use the variable names because we had defined the problem with the macro. But in general, we can use the indices. The previous plots would be:

- - -
-plot(sol,vars=(1,2,3))
-plot(sol,vars=[1])
-
- - - - -

Common options are to add titles, axis, and labels. For example:

- - -
-plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line",
-xaxis="Time (t)",yaxis="u(t) (in mm)",label=["X","Y","Z"])
-
- - - - -

Notice that series recipes apply to the solution type as well. For example, we can use a scatter plot on the timeseries:

- - -
-scatter(sol,vars=[:x])
-
- - - - -

This shows that the recipe is using the interpolation to smooth the plot. It becomes abundantly clear when we turn it off using denseplot=false:

- - -
-plot(sol,vars=(1,2,3),denseplot=false)
-
- - - - -

When this is done, only the values the timestep hits are plotted. Using the interpolation usually results in a much nicer looking plot so it's recommended, and since the interpolations have similar orders to the numerical methods, their results are trustworthy on the full interval. We can control the number of points used in the interpolation's plot using the plotdensity command:

- - -
-plot(sol,vars=(1,2,3),plotdensity=100)
-
- - - - -

That's plotting the entire solution using 100 points spaced evenly in time.

- - -
-plot(sol,vars=(1,2,3),plotdensity=10000)
-
- - - - -

That's more like it! By default it uses 100*length(sol), where the length is the number of internal steps it had to take. This heuristic usually does well, but unusually difficult equations it can be relaxed (since it will take small steps), and for equations with events / discontinuities raising the plot density can help resolve the discontinuity.

-

Lastly notice that we can compose plots. Let's show where the 100 points are using a scatter plot:

- - -
-plot(sol,vars=(1,2,3))
-scatter!(sol,vars=(1,2,3),plotdensity=100)
-
- - - - -

We can instead work with an explicit plot object. This form can be better for building a complex plot in a loop.

- - -
-p = plot(sol,vars=(1,2,3))
-scatter!(p,sol,vars=(1,2,3),plotdensity=100)
-title!("I added a title")
-
- - - - -

You can do all sorts of things. Have fun!

- - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("introduction","formatting_plots.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/introduction/ode_introduction.html b/html/introduction/ode_introduction.html deleted file mode 100644 index b2b6898e..00000000 --- a/html/introduction/ode_introduction.html +++ /dev/null @@ -1,1716 +0,0 @@ - - - - - - An Intro to DifferentialEquations.jl - - - - - - - - - - - - - - - - - -
-
-
- -
-

An Intro to DifferentialEquations.jl

-
Chris Rackauckas
- -
- -

Basic Introduction Via Ordinary Differential Equations

-

This notebook will get you started with DifferentialEquations.jl by introducing you to the functionality for solving ordinary differential equations (ODEs). The corresponding documentation page is the ODE tutorial. While some of the syntax may be different for other types of equations, the same general principles hold in each case. Our goal is to give a gentle and thorough introduction that highlights these principles in a way that will help you generalize what you have learned.

-

Background

-

If you are new to the study of differential equations, it can be helpful to do a quick background read on the definition of ordinary differential equations. We define an ordinary differential equation as an equation which describes the way that a variable $u$ changes, that is

-

\[ -u' = f(u,p,t) -\]

-

where $p$ are the parameters of the model, $t$ is the time variable, and $f$ is the nonlinear model of how $u$ changes. The initial value problem also includes the information about the starting value:

-

\[ -u(t_0) = u_0 -\]

-

Together, if you know the starting value and you know how the value will change with time, then you know what the value will be at any time point in the future. This is the intuitive definition of a differential equation.

-

First Model: Exponential Growth

-

Our first model will be the canonical exponential growth model. This model says that the rate of change is proportional to the current value, and is this:

-

\[ -u' = au -\]

-

where we have a starting value $u(0)=u_0$. Let's say we put 1 dollar into Bitcoin which is increasing at a rate of $98\%$ per year. Then calling now $t=0$ and measuring time in years, our model is:

-

\[ -u' = 0.98u -\]

-

and $u(0) = 1.0$. We encode this into Julia by noticing that, in this setup, we match the general form when

- - -
-f(u,p,t) = 0.98u
-
- - -
-f (generic function with 1 method)
-
- - -

with $ u_0 = 1.0 $. If we want to solve this model on a time span from t=0.0 to t=1.0, then we define an ODEProblem by specifying this function f, this initial condition u0, and this time span as follows:

- - -
-using DifferentialEquations
-f(u,p,t) = 0.98u
-u0 = 1.0
-tspan = (0.0,1.0)
-prob = ODEProblem(f,u0,tspan)
-
- - -
-ODEProblem with uType Float64 and tType Float64. In-place: false
-timespan: (0.0, 1.0)
-u0: 1.0
-
- - -

To solve our ODEProblem we use the command solve.

- - -
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 5-element Array{Float64,1}:
- 0.0                
- 0.10042494449239292
- 0.35218555997054785
- 0.6934428593452983 
- 1.0                
-u: 5-element Array{Float64,1}:
- 1.0               
- 1.1034222047865465
- 1.4121902211481592
- 1.9730369899955797
- 2.664456142481388
-
- - -

and that's it: we have succesfully solved our first ODE!

-

Analyzing the Solution

-

Of course, the solution type is not interesting in and of itself. We want to understand the solution! The documentation page which explains in detail the functions for analyzing the solution is the Solution Handling page. Here we will describe some of the basics. You can plot the solution using the plot recipe provided by Plots.jl:

- - -
-using Plots; gr()
-plot(sol)
-
- - - - -

From the picture we see that the solution is an exponential curve, which matches our intuition. As a plot recipe, we can annotate the result using any of the Plots.jl attributes. For example:

- - -
-plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line",
-     xaxis="Time (t)",yaxis="u(t) (in μm)",label="My Thick Line!") # legend=false
-
- - - - -

Using the mutating plot! command we can add other pieces to our plot. For this ODE we know that the true solution is $u(t) = u_0 exp(at)$, so let's add some of the true solution to our plot:

- - -
-plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!")
-
- - - - -

In the previous command I demonstrated sol.t, which grabs the array of time points that the solution was saved at:

- - -
-sol.t
-
- - -
-5-element Array{Float64,1}:
- 0.0                
- 0.10042494449239292
- 0.35218555997054785
- 0.6934428593452983 
- 1.0
-
- - -

We can get the array of solution values using sol.u:

- - -
-sol.u
-
- - -
-5-element Array{Float64,1}:
- 1.0               
- 1.1034222047865465
- 1.4121902211481592
- 1.9730369899955797
- 2.664456142481388
-
- - -

sol.u[i] is the value of the solution at time sol.t[i]. We can compute arrays of functions of the solution values using standard comprehensions, like:

- - -
-[t+u for (u,t) in tuples(sol)]
-
- - -
-5-element Array{Float64,1}:
- 1.0               
- 1.2038471492789395
- 1.764375781118707 
- 2.666479849340878 
- 3.664456142481388
-
- - -

However, one interesting feature is that, by default, the solution is a continuous function. If we check the print out again:

- - -
-sol
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 5-element Array{Float64,1}:
- 0.0                
- 0.10042494449239292
- 0.35218555997054785
- 0.6934428593452983 
- 1.0                
-u: 5-element Array{Float64,1}:
- 1.0               
- 1.1034222047865465
- 1.4121902211481592
- 1.9730369899955797
- 2.664456142481388
-
- - -

you see that it says that the solution has a order changing interpolation. The default algorithm automatically switches between methods in order to handle all types of problems. For non-stiff equations (like the one we are solving), it is a continuous function of 4th order accuracy. We can call the solution as a function of time sol(t). For example, to get the value at t=0.45, we can use the command:

- - -
-sol(0.45)
-
- - -
-1.554261048052598
-
- - -

Controlling the Solver

-

DifferentialEquations.jl has a common set of solver controls among its algorithms which can be found at the Common Solver Options page. We will detail some of the most widely used options.

-

The most useful options are the tolerances abstol and reltol. These tell the internal adaptive time stepping engine how precise of a solution you want. Generally, reltol is the relative accuracy while abstol is the accuracy when u is near zero. These tolerances are local tolerances and thus are not global guarantees. However, a good rule of thumb is that the total solution accuracy is 1-2 digits less than the relative tolerances. Thus for the defaults abstol=1e-6 and reltol=1e-3, you can expect a global accuracy of about 1-2 digits. If we want to get around 6 digits of accuracy, we can use the commands:

- - -
-sol = solve(prob,abstol=1e-8,reltol=1e-8)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 9-element Array{Float64,1}:
- 0.0                
- 0.04127492324135852
- 0.14679859162071174
- 0.2863144086370134 
- 0.43819229272387084
- 0.6118901306410577 
- 0.7985632398933935 
- 0.999348375527082  
- 1.0                
-u: 9-element Array{Float64,1}:
- 1.0               
- 1.0412786454705882
- 1.1547254611993492
- 1.3239082009277183
- 1.5363791502407675
- 1.8214854108673353
- 2.1871339215496812
- 2.6627552847037683
- 2.6644562419335163
-
- - -

Now we can see no visible difference against the true solution:

- - -
-plot(sol)
-plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!")
-
- - - - -

Notice that by decreasing the tolerance, the number of steps the solver had to take was 9 instead of the previous 5. There is a trade off between accuracy and speed, and it is up to you to determine what is the right balance for your problem.

-

Another common option is to use saveat to make the solver save at specific time points. For example, if we want the solution at an even grid of t=0.1k for integers k, we would use the command:

- - -
-sol = solve(prob,saveat=0.1)
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 11-element Array{Float64,1}:
- 0.0
- 0.1
- 0.2
- 0.3
- 0.4
- 0.5
- 0.6
- 0.7
- 0.8
- 0.9
- 1.0
-u: 11-element Array{Float64,1}:
- 1.0               
- 1.1029627851292922
- 1.2165269512231858
- 1.341783821228911 
- 1.4799379510608222
- 1.632316207048572 
- 1.8003833265032891
- 1.9857565541611828
- 2.1902158127993494
- 2.4157257420771905
- 2.664456142481388
-
- - -

Notice that when saveat is used the continuous output variables are no longer saved and thus sol(t), the interpolation, is only first order. We can save at an uneven grid of points by passing a collection of values to saveat. For example:

- - -
-sol = solve(prob,saveat=[0.2,0.7,0.9])
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 3-element Array{Float64,1}:
- 0.2
- 0.7
- 0.9
-u: 3-element Array{Float64,1}:
- 1.2165269512231858
- 1.9857565541611828
- 2.4157257420771905
-
- - -

If we need to reduce the amount of saving, we can also turn off the continuous output directly via dense=false:

- - -
-sol = solve(prob,dense=false)
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 5-element Array{Float64,1}:
- 0.0                
- 0.10042494449239292
- 0.35218555997054785
- 0.6934428593452983 
- 1.0                
-u: 5-element Array{Float64,1}:
- 1.0               
- 1.1034222047865465
- 1.4121902211481592
- 1.9730369899955797
- 2.664456142481388
-
- - -

and to turn off all intermediate saving we can use save_everystep=false:

- - -
-sol = solve(prob,save_everystep=false)
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 2-element Array{Float64,1}:
- 0.0
- 1.0
-u: 2-element Array{Float64,1}:
- 1.0              
- 2.664456142481388
-
- - -

If we want to solve and only save the final value, we can even set save_start=false.

- - -
-sol = solve(prob,save_everystep=false,save_start = false)
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 1-element Array{Float64,1}:
- 1.0
-u: 1-element Array{Float64,1}:
- 2.664456142481388
-
- - -

Note that similarly on the other side there is save_end=false.

-

More advanced saving behaviors, such as saving functionals of the solution, are handled via the SavingCallback in the Callback Library which will be addressed later in the tutorial.

-

Choosing Solver Algorithms

-

There is no best algorithm for numerically solving a differential equation. When you call solve(prob), DifferentialEquations.jl makes a guess at a good algorithm for your problem, given the properties that you ask for (the tolerances, the saving information, etc.). However, in many cases you may want more direct control. A later notebook will help introduce the various algorithms in DifferentialEquations.jl, but for now let's introduce the syntax.

-

The most crucial determining factor in choosing a numerical method is the stiffness of the model. Stiffness is roughly characterized by a Jacobian f with large eigenvalues. That's quite mathematical, and we can think of it more intuitively: if you have big numbers in f (like parameters of order 1e5), then it's probably stiff. Or, as the creator of the MATLAB ODE Suite, Lawrence Shampine, likes to define it, if the standard algorithms are slow, then it's stiff. We will go into more depth about diagnosing stiffness in a later tutorial, but for now note that if you believe your model may be stiff, you can hint this to the algorithm chooser via alg_hints = [:stiff].

- - -
-sol = solve(prob,alg_hints=[:stiff])
-
- - -
-retcode: Success
-Interpolation: specialized 3rd order "free" stiffness-aware interpolation
-t: 8-element Array{Float64,1}:
- 0.0                
- 0.05653299582822294
- 0.17271077475592767
- 0.31646354003353006
- 0.5057553691509542 
- 0.729232165912709  
- 0.9913092529539482 
- 1.0                
-u: 8-element Array{Float64,1}:
- 1.0               
- 1.0569657840336502
- 1.1844239582097844
- 1.3636081192594387
- 1.6415485796571891
- 2.0434651240673647
- 2.641856044052394 
- 2.6644526428672295
-
- - -

Stiff algorithms have to solve implicit equations and linear systems at each step so they should only be used when required.

-

If we want to choose an algorithm directly, you can pass the algorithm type after the problem as solve(prob,alg). For example, let's solve this problem using the Tsit5() algorithm, and just for show let's change the relative tolerance to 1e-6 at the same time:

- - -
-sol = solve(prob,Tsit5(),reltol=1e-6)
-
- - -
-retcode: Success
-Interpolation: specialized 4th order "free" interpolation
-t: 10-element Array{Float64,1}:
- 0.0                 
- 0.028970819746309166
- 0.10049166796488228 
- 0.19458902119285174 
- 0.30717214276081134 
- 0.4394534008655806  
- 0.588342842159556   
- 0.7524861771800566  
- 0.9293007776586718  
- 1.0                 
-u: 10-element Array{Float64,1}:
- 1.0               
- 1.0287982807225062
- 1.1034943588056554
- 1.2100930298008687
- 1.3512481217443626
- 1.538279113706816 
- 1.7799334670696616
- 2.090569368579473 
- 2.4860988691919346
- 2.6644562434913306
-
- - -

Systems of ODEs: The Lorenz Equation

-

Now let's move to a system of ODEs. The Lorenz equation is the famous "butterfly attractor" that spawned chaos theory. It is defined by the system of ODEs:

-

\[ -\begin{align} -\frac{dx}{dt} &= \sigma (y - x)\\ -\frac{dy}{dt} &= x (\rho - z) -y\\ -\frac{dz}{dt} &= xy - \beta z -\end{align} -\]

-

To define a system of differential equations in DifferentialEquations.jl, we define our f as a vector function with a vector initial condition. Thus, for the vector u = [x,y,z]', we have the derivative function:

- - -
-function lorenz!(du,u,p,t)
-    σ,ρ,β = p
-    du[1] = σ*(u[2]-u[1])
-    du[2] = u[1]*(ρ-u[3]) - u[2]
-    du[3] = u[1]*u[2] - β*u[3]
-end
-
- - -
-lorenz! (generic function with 1 method)
-
- - -

Notice here we used the in-place format which writes the output to the preallocated vector du. For systems of equations the in-place format is faster. We use the initial condition $u_0 = [1.0,0.0,0.0]$ as follows:

- - -
-u0 = [1.0,0.0,0.0]
-
- - -
-3-element Array{Float64,1}:
- 1.0
- 0.0
- 0.0
-
- - -

Lastly, for this model we made use of the parameters p. We need to set this value in the ODEProblem as well. For our model we want to solve using the parameters $\sigma = 10$, $\rho = 28$, and $\beta = 8/3$, and thus we build the parameter collection:

- - -
-p = (10,28,8/3) # we could also make this an array, or any other type!
-
- - -
-(10, 28, 2.6666666666666665)
-
- - -

Now we generate the ODEProblem type. In this case, since we have parameters, we add the parameter values to the end of the constructor call. Let's solve this on a time span of t=0 to t=100:

- - -
-tspan = (0.0,100.0)
-prob = ODEProblem(lorenz!,u0,tspan,p)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 100.0)
-u0: [1.0, 0.0, 0.0]
-
- - -

Now, just as before, we solve the problem:

- - -
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 1287-element Array{Float64,1}:
-   0.0                  
-   3.5678604836301404e-5
-   0.0003924646531993154
-   0.0032624016752212923
-   0.00905808176456279  
-   0.0169564955927642   
-   0.02769000245764448  
-   0.04185634375662893  
-   0.06024025665362463  
-   0.0836852441654334   
-   ⋮                    
-  99.40002509604705     
-  99.47321520444633     
-  99.54429913558833     
-  99.6304176475736      
-  99.73556893651245     
-  99.81512588011671     
-  99.88533419341042     
-  99.94751447208056     
- 100.0                  
-u: 1287-element Array{Array{Float64,1},1}:
- [1.0, 0.0, 0.0]                    
- [0.999643, 0.000998805, 1.78143e-8]
- [0.996105, 0.0109654, 2.14696e-6]  
- [0.969359, 0.0897704, 0.000143801] 
- [0.924204, 0.242289, 0.00104616]   
- [0.880045, 0.438737, 0.00342427]   
- [0.848331, 0.691564, 0.00848765]   
- [0.849504, 1.01454, 0.0182121]     
- [0.913906, 1.44256, 0.0366936]     
- [1.08886, 2.05232, 0.0740254]      
- ⋮                                  
- [1.58876, 1.17199, 19.8509]        
- [1.65121, 2.18322, 16.5046]        
- [2.33189, 3.75197, 14.0293]        
- [4.22093, 7.36092, 12.5236]        
- [9.24468, 15.6918, 16.6348]        
- [14.3434, 19.1929, 29.3336]        
- [14.753, 10.4484, 39.3374]         
- [10.2159, 1.29592, 37.3659]        
- [5.79036, -1.64216, 32.3329]
-
- - -

The same solution handling features apply to this case. Thus sol.t stores the time points and sol.u is an array storing the solution at the corresponding time points.

-

However, there are a few extra features which are good to know when dealing with systems of equations. First of all, sol also acts like an array. sol[i] returns the solution at the ith time point.

- - -
-sol.t[10],sol[10]
-
- - -
-(0.0836852441654334, [1.08886, 2.05232, 0.0740254])
-
- - -

Additionally, the solution acts like a matrix where sol[j,i] is the value of the jth variable at time i:

- - -
-sol[2,10]
-
- - -
-2.052321820923891
-
- - -

We can get a real matrix by performing a conversion:

- - -
-A = Array(sol)
-
- - -
-3×1287 Array{Float64,2}:
- 1.0  0.999643     0.996105    0.969359     …  14.753   10.2159    5.79036
- 0.0  0.000998805  0.0109654   0.0897704       10.4484   1.29592  -1.64216
- 0.0  1.78143e-8   2.14696e-6  0.000143801     39.3374  37.3659   32.3329
-
- - -

This is the same as sol, i.e. sol[i,j] = A[i,j], but now it's a true matrix. Plotting will by default show the time series for each variable:

- - -
-plot(sol)
-
- - - - -

If we instead want to plot values against each other, we can use the vars command. Let's plot variable 1 against variable 2 against variable 3:

- - -
-plot(sol,vars=(1,2,3))
-
- - - - -

This is the classic Lorenz attractor plot, where the x axis is u[1], the y axis is u[2], and the z axis is u[3]. Note that the plot recipe by default uses the interpolation, but we can turn this off:

- - -
-plot(sol,vars=(1,2,3),denseplot=false)
-
- - - - -

Yikes! This shows how calculating the continuous solution has saved a lot of computational effort by computing only a sparse solution and filling in the values! Note that in vars, 0=time, and thus we can plot the time series of a single component like:

- - -
-plot(sol,vars=(0,2))
-
- - - - -

A DSL for Parameterized Functions

-

In many cases you may be defining a lot of functions with parameters. There exists the domain-specific language (DSL) defined by the @ode_def macro for helping with this common problem. For example, we can define the Lotka-Volterra equation:

-

\[ -\begin{align} -\frac{dx}{dt} &= ax - bxy\\ -\frac{dy}{dt} &= -cy + dxy -\end{align} -\]

-

as follows:

- - -
-function lotka_volterra!(du,u,p,t)
-  du[1] = p[1]*u[1] - p[2]*u[1]*u[2]
-  du[2] = -p[3]*u[2] + p[4]*u[1]*u[2]
-end
-
- - -
-lotka_volterra! (generic function with 1 method)
-
- - -

However, that can be hard to follow since there's a lot of "programming" getting in the way. Instead, you can use the @ode_def macro from ParameterizedFunctions.jl:

- - -
-using ParameterizedFunctions
-lv! = @ode_def LotkaVolterra begin
-  dx = a*x - b*x*y
-  dy = -c*y + d*x*y
-end a b c d
-
- - -
-(::Main.WeaveSandBox8.LotkaVolterra{getfield(Main.WeaveSandBox8, Symbol("##
-7#11")),getfield(Main.WeaveSandBox8, Symbol("##8#12")),getfield(Main.WeaveS
-andBox8, Symbol("##9#13")),Nothing,Nothing,getfield(Main.WeaveSandBox8, Sym
-bol("##10#14")),Expr,Expr}) (generic function with 2 methods)
-
- - -

We can then use the result just like an ODE function from before:

- - -
-u0 = [1.0,1.0]
-p = (1.5,1.0,3.0,1.0)
-tspan = (0.0,10.0)
-prob = ODEProblem(lv!,u0,tspan,p)
-sol = solve(prob)
-plot(sol)
-
- - - - -

Not only is the DSL convenient syntax, but it does some magic behind the scenes. For example, further parts of the tutorial will describe how solvers for stiff differential equations have to make use of the Jacobian in calculations. Here, the DSL uses symbolic differentiation to automatically derive that function:

- - -
-lv!.Jex
-
- - -
-quote
-    internal_var___J[1, 1] = internal_var___p[1] - internal_var___p[2] * in
-ternal_var___u[2]
-    internal_var___J[1, 2] = -(internal_var___p[2]) * internal_var___u[1]
-    internal_var___J[2, 1] = internal_var___p[4] * internal_var___u[2]
-    internal_var___J[2, 2] = -(internal_var___p[3]) + internal_var___p[4] *
- internal_var___u[1]
-    nothing
-end
-
- - -

The DSL can derive many other functions; this ability is used to speed up the solvers. An extension to DifferentialEquations.jl, Latexify.jl, allows you to extract these pieces as LaTeX expressions.

-

Internal Types

-

The last basic user-interface feature to explore is the choice of types. DifferentialEquations.jl respects your input types to determine the internal types that are used. Thus since in the previous cases, when we used Float64 values for the initial condition, this meant that the internal values would be solved using Float64. We made sure that time was specified via Float64 values, meaning that time steps would utilize 64-bit floats as well. But, by simply changing these types we can change what is used internally.

-

As a quick example, let's say we want to solve an ODE defined by a matrix. To do this, we can simply use a matrix as input.

- - -
-A  = [1. 0  0 -5
-      4 -2  4 -3
-     -4  0  0  1
-      5 -2  2  3]
-u0 = rand(4,2)
-tspan = (0.0,1.0)
-f(u,p,t) = A*u
-prob = ODEProblem(f,u0,tspan)
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 10-element Array{Float64,1}:
- 0.0                
- 0.05079806938004541
- 0.13262804118444996
- 0.22515124895279603
- 0.3434424284289903 
- 0.4733124410851997 
- 0.6188622542290512 
- 0.7707665267529566 
- 0.9366537231343952 
- 1.0                
-u: 10-element Array{Array{Float64,2},1}:
- [0.299689 0.146936; 0.464418 0.925977; 0.359755 0.85002; 0.344565 0.230628
-]     
- [0.210675 0.0866929; 0.476829 0.98434; 0.327764 0.839233; 0.457898 0.28882
-2]    
- [-0.000924563 -0.0436747; 0.413107 1.0242; 0.335355 0.857677; 0.615059 0.3
-51333]
- [-0.331614 -0.224548; 0.245112 1.0154; 0.457352 0.940292; 0.740278 0.36739
-8]    
- [-0.860708 -0.465867; -0.0399083 0.984051; 0.827682 1.14434; 0.790089 0.29
-0931] 
- [-1.49657 -0.666169; -0.294643 1.02687; 1.53653 1.46851; 0.661403 0.071955
-2]    
- [-2.10905 -0.68167; -0.261749 1.3101; 2.66606 1.8579; 0.241804 -0.3352]   
-      
- [-2.36666 -0.300285; 0.43294 1.98895; 4.0358 2.08846; -0.534534 -0.899123]
-      
- [-1.80598 0.749742; 2.25609 3.19048; 5.29639 1.77492; -1.74273 -1.56085]  
-      
- [-1.26884 1.34632; 3.27453 3.73993; 5.56311 1.40551; -2.27517 -1.78572]
-
- - -

There is no real difference from what we did before, but now in this case u0 is a 4x2 matrix. Because of that, the solution at each time point is matrix:

- - -
-sol[3]
-
- - -
-4×2 Array{Float64,2}:
- -0.000924563  -0.0436747
-  0.413107      1.0242   
-  0.335355      0.857677 
-  0.615059      0.351333
-
- - -

In DifferentialEquations.jl, you can use any type that defines +, -, *, /, and has an appropriate norm. For example, if we want arbitrary precision floating point numbers, we can change the input to be a matrix of BigFloat:

- - -
-big_u0 = big.(u0)
-
- - -
-4×2 Array{BigFloat,2}:
- 0.299689  0.146936
- 0.464418  0.925977
- 0.359755  0.85002 
- 0.344565  0.230628
-
- - -

and we can solve the ODEProblem with arbitrary precision numbers by using that initial condition:

- - -
-prob = ODEProblem(f,big_u0,tspan)
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 5-element Array{Float64,1}:
- 0.0                
- 0.19099685937400715
- 0.47758497831992464
- 0.7867482223097589 
- 1.0                
-u: 5-element Array{Array{BigFloat,2},1}:
- [0.299689 0.146936; 0.464418 0.925977; 0.359755 0.85002; 0.344565 0.230628
-] 
- [-0.19932 -0.155086; 0.316326 1.02301; 0.396559 0.901787; 0.70167 0.368772
-] 
- [-1.51705 -0.670459; -0.299778 1.03099; 1.56509 1.48022; 0.653431 0.062345
-] 
- [-2.35768 -0.230121; 0.556688 2.08512; 4.17751 2.09057; -0.635913 -0.96329
-9]
- [-1.26883 1.34633; 3.27454 3.73994; 5.56312 1.40551; -2.27518 -1.78573]
-
- - - -
-sol[1,3]
-
- - -
--1.517050746619277104565045234928230896846412531828834969540629324157913004
-774699
-
- - -

To really make use of this, we would want to change abstol and reltol to be small! Notice that the type for "time" is different than the type for the dependent variables, and this can be used to optimize the algorithm via keeping multiple precisions. We can convert time to be arbitrary precision as well by defining our time span with BigFloat variables:

- - -
-prob = ODEProblem(f,big_u0,big.(tspan))
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 5-element Array{BigFloat,1}:
- 0.0                                                                       
-       
- 0.190996859374007152536108827422345372022881594594763730844874263638536802
-3341579
- 0.477584978319924676065997214585653778208421690082332177093803624617836983
-7479661
- 0.786748222309758995145817143342544582349791199612436999756194174252621270
-5035187
- 1.0                                                                       
-       
-u: 5-element Array{Array{BigFloat,2},1}:
- [0.299689 0.146936; 0.464418 0.925977; 0.359755 0.85002; 0.344565 0.230628
-] 
- [-0.19932 -0.155086; 0.316326 1.02301; 0.396559 0.901787; 0.70167 0.368772
-] 
- [-1.51705 -0.670459; -0.299778 1.03099; 1.56509 1.48022; 0.653431 0.062345
-] 
- [-2.35768 -0.230121; 0.556688 2.08512; 4.17751 2.09057; -0.635913 -0.96329
-9]
- [-1.26883 1.34633; 3.27454 3.73994; 5.56312 1.40551; -2.27518 -1.78573]
-
- - -

Let's end by showing a more complicated use of types. For small arrays, it's usually faster to do operations on static arrays via the package StaticArrays.jl. The syntax is similar to that of normal arrays, but for these special arrays we utilize the @SMatrix macro to indicate we want to create a static array.

- - -
-using StaticArrays
-A  = @SMatrix [ 1.0  0.0 0.0 -5.0
-                4.0 -2.0 4.0 -3.0
-               -4.0  0.0 0.0  1.0
-                5.0 -2.0 2.0  3.0]
-u0 = @SMatrix rand(4,2)
-tspan = (0.0,1.0)
-f(u,p,t) = A*u
-prob = ODEProblem(f,u0,tspan)
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 10-element Array{Float64,1}:
- 0.0                 
- 0.051452908625593964
- 0.13341860253535856 
- 0.22970283174453981 
- 0.35338558097221795 
- 0.4922793724686261  
- 0.6378043954293466  
- 0.7945140222625662  
- 0.9816600660987124  
- 1.0                 
-u: 10-element Array{StaticArrays.SArray{Tuple{4,2},Float64,2,8},1}:
- [0.326566 0.446768; 0.57192 0.931648; 0.943232 0.0692793; 0.329218 0.12962
-4]   
- [0.234805 0.43024; 0.690946 0.909341; 0.905929 -0.0133523; 0.49567 0.17407
-6]   
- [-0.00694068 0.379071; 0.755724 0.82508; 0.915836 -0.129932; 0.724816 0.23
-6679]
- [-0.425191 0.28254; 0.675142 0.661152; 1.07453 -0.2328; 0.91393 0.294935] 
-     
- [-1.11721 0.108818; 0.440542 0.377603; 1.57033 -0.291985; 0.982971 0.34103
-1]   
- [-1.95702 -0.134626; 0.244081 0.0182; 2.55116 -0.23818; 0.761367 0.347594]
-     
- [-2.64615 -0.412047; 0.473356 -0.323086; 3.97829 -0.0316418; 0.128545 0.29
-4769]
- [-2.75924 -0.682291; 1.64838 -0.545673; 5.65452 0.351375; -1.03039 0.15980
-7]   
- [-1.35196 -0.860136; 4.74756 -0.469785; 6.95667 0.948738; -2.94063 -0.1168
-49]  
- [-1.09537 -0.86369; 5.15525 -0.437198; 6.99077 1.00953; -3.14616 -0.1507]
-
- - - -
-sol[3]
-
- - -
-4×2 StaticArrays.SArray{Tuple{4,2},Float64,2,8}:
- -0.00694068   0.379071
-  0.755724     0.82508 
-  0.915836    -0.129932
-  0.724816     0.236679
-
- - -

Conclusion

-

These are the basic controls in DifferentialEquations.jl. All equations are defined via a problem type, and the solve command is used with an algorithm choice (or the default) to get a solution. Every solution acts the same, like an array sol[i] with sol.t[i], and also like a continuous function sol(t) with a nice plot command plot(sol). The Common Solver Options can be used to control the solver for any equation type. Lastly, the types used in the numerical solving are determined by the input types, and this can be used to solve with arbitrary precision and add additional optimizations (this can be used to solve via GPUs for example!). While this was shown on ODEs, these techniques generalize to other types of equations as well.

- - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("introduction","ode_introduction.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/introduction/optimizing_diffeq_code.html b/html/introduction/optimizing_diffeq_code.html deleted file mode 100644 index 23dbb856..00000000 --- a/html/introduction/optimizing_diffeq_code.html +++ /dev/null @@ -1,1771 +0,0 @@ - - - - - - Optimizing DiffEq Code - - - - - - - - - - - - - - - - - -
-
-
- -
-

Optimizing DiffEq Code

-
Chris Rackauckas
- -
- -

In this notebook we will walk through some of the main tools for optimizing your code in order to efficiently solve DifferentialEquations.jl. User-side optimizations are important because, for sufficiently difficult problems, most of the time will be spent inside of your f function, the function you are trying to solve. "Efficient" integrators are those that reduce the required number of f calls to hit the error tolerance. The main ideas for optimizing your DiffEq code, or any Julia function, are the following:

-
    -
  • Make it non-allocating

    -
  • -
  • Use StaticArrays for small arrays

    -
  • -
  • Use broadcast fusion

    -
  • -
  • Make it type-stable

    -
  • -
  • Reduce redundant calculations

    -
  • -
  • Make use of BLAS calls

    -
  • -
  • Optimize algorithm choice

    -
  • -
-

We'll discuss these strategies in the context of small and large systems. Let's start with small systems.

-

Optimizing Small Systems (<100 DEs)

-

Let's take the classic Lorenz system from before. Let's start by naively writing the system in its out-of-place form:

- - -
-function lorenz(u,p,t)
- dx = 10.0*(u[2]-u[1])
- dy = u[1]*(28.0-u[3]) - u[2]
- dz = u[1]*u[2] - (8/3)*u[3]
- [dx,dy,dz]
-end
-
- - -
-lorenz (generic function with 1 method)
-
- - -

Here, lorenz returns an object, [dx,dy,dz], which is created within the body of lorenz.

-

This is a common code pattern from high-level languages like MATLAB, SciPy, or R's deSolve. However, the issue with this form is that it allocates a vector, [dx,dy,dz], at each step. Let's benchmark the solution process with this choice of function:

- - -
-using DifferentialEquations, BenchmarkTools
-u0 = [1.0;0.0;0.0]
-tspan = (0.0,100.0)
-prob = ODEProblem(lorenz,u0,tspan)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  10.94 MiB
-  allocs estimate:  102469
-  --------------
-  minimum time:     3.363 ms (0.00% GC)
-  median time:      5.887 ms (0.00% GC)
-  mean time:        6.990 ms (39.32% GC)
-  maximum time:     20.291 ms (45.00% GC)
-  --------------
-  samples:          714
-  evals/sample:     1
-
- - -

The BenchmarkTools package's @benchmark runs the code multiple times to get an accurate measurement. The minimum time is the time it takes when your OS and other background processes aren't getting in the way. Notice that in this case it takes about 5ms to solve and allocates around 11.11 MiB. However, if we were to use this inside of a real user code we'd see a lot of time spent doing garbage collection (GC) to clean up all of the arrays we made. Even if we turn off saving we have these allocations.

- - -
-@benchmark solve(prob,Tsit5(),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  9.57 MiB
-  allocs estimate:  89577
-  --------------
-  minimum time:     3.094 ms (0.00% GC)
-  median time:      4.008 ms (0.00% GC)
-  mean time:        6.194 ms (38.81% GC)
-  maximum time:     18.892 ms (64.55% GC)
-  --------------
-  samples:          805
-  evals/sample:     1
-
- - -

The problem of course is that arrays are created every time our derivative function is called. This function is called multiple times per step and is thus the main source of memory usage. To fix this, we can use the in-place form to ***make our code non-allocating***:

- - -
-function lorenz!(du,u,p,t)
- du[1] = 10.0*(u[2]-u[1])
- du[2] = u[1]*(28.0-u[3]) - u[2]
- du[3] = u[1]*u[2] - (8/3)*u[3]
-end
-
- - -
-lorenz! (generic function with 1 method)
-
- - -

Here, instead of creating an array each time, we utilized the cache array du. When the inplace form is used, DifferentialEquations.jl takes a different internal route that minimizes the internal allocations as well. When we benchmark this function, we will see quite a difference.

- - -
-u0 = [1.0;0.0;0.0]
-tspan = (0.0,100.0)
-prob = ODEProblem(lorenz!,u0,tspan)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  1.37 MiB
-  allocs estimate:  12964
-  --------------
-  minimum time:     763.400 μs (0.00% GC)
-  median time:      806.001 μs (0.00% GC)
-  mean time:        1.201 ms (26.34% GC)
-  maximum time:     10.577 ms (86.19% GC)
-  --------------
-  samples:          4151
-  evals/sample:     1
-
- - - -
-@benchmark solve(prob,Tsit5(),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  6.83 KiB
-  allocs estimate:  92
-  --------------
-  minimum time:     407.901 μs (0.00% GC)
-  median time:      413.900 μs (0.00% GC)
-  mean time:        442.637 μs (0.45% GC)
-  maximum time:     7.306 ms (93.70% GC)
-  --------------
-  samples:          10000
-  evals/sample:     1
-
- - -

There is a 4x time difference just from that change! Notice there are still some allocations and this is due to the construction of the integration cache. But this doesn't scale with the problem size:

- - -
-tspan = (0.0,500.0) # 5x longer than before
-prob = ODEProblem(lorenz!,u0,tspan)
-@benchmark solve(prob,Tsit5(),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  6.83 KiB
-  allocs estimate:  92
-  --------------
-  minimum time:     2.082 ms (0.00% GC)
-  median time:      2.257 ms (0.00% GC)
-  mean time:        2.336 ms (0.00% GC)
-  maximum time:     3.490 ms (0.00% GC)
-  --------------
-  samples:          2138
-  evals/sample:     1
-
- - -

since that's all just setup allocations.

-

But if the system is small we can optimize even more.

-

Allocations are only expensive if they are "heap allocations". For a more in-depth definition of heap allocations, there are a lot of sources online. But a good working definition is that heap allocations are variable-sized slabs of memory which have to be pointed to, and this pointer indirection costs time. Additionally, the heap has to be managed and the garbage controllers has to actively keep track of what's on the heap.

-

However, there's an alternative to heap allocations, known as stack allocations. The stack is statically-sized (known at compile time) and thus its accesses are quick. Additionally, the exact block of memory is known in advance by the compiler, and thus re-using the memory is cheap. This means that allocating on the stack has essentially no cost!

-

Arrays have to be heap allocated because their size (and thus the amount of memory they take up) is determined at runtime. But there are structures in Julia which are stack-allocated. structs for example are stack-allocated "value-type"s. Tuples are a stack-allocated collection. The most useful data structure for DiffEq though is the StaticArray from the package StaticArrays.jl. These arrays have their length determined at compile-time. They are created using macros attached to normal array expressions, for example:

- - -
-using StaticArrays
-A = @SVector [2.0,3.0,5.0]
-
- - -
-3-element StaticArrays.SArray{Tuple{3},Float64,1,3}:
- 2.0
- 3.0
- 5.0
-
- - -

Notice that the 3 after SVector gives the size of the SVector. It cannot be changed. Additionally, SVectors are immutable, so we have to create a new SVector to change values. But remember, we don't have to worry about allocations because this data structure is stack-allocated. SArrays have a lot of extra optimizations as well: they have fast matrix multiplication, fast QR factorizations, etc. which directly make use of the information about the size of the array. Thus, when possible they should be used.

-

Unfortunately static arrays can only be used for sufficiently small arrays. After a certain size, they are forced to heap allocate after some instructions and their compile time balloons. Thus static arrays shouldn't be used if your system has more than 100 variables. Additionally, only the native Julia algorithms can fully utilize static arrays.

-

Let's ***optimize lorenz using static arrays***. Note that in this case, we want to use the out-of-place allocating form, but this time we want to output a static array:

- - -
-function lorenz_static(u,p,t)
- dx = 10.0*(u[2]-u[1])
- dy = u[1]*(28.0-u[3]) - u[2]
- dz = u[1]*u[2] - (8/3)*u[3]
- @SVector [dx,dy,dz]
-end
-
- - -
-lorenz_static (generic function with 1 method)
-
- - -

To make the solver internally use static arrays, we simply give it a static array as the initial condition:

- - -
-u0 = @SVector [1.0,0.0,0.0]
-tspan = (0.0,100.0)
-prob = ODEProblem(lorenz_static,u0,tspan)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  471.98 KiB
-  allocs estimate:  2662
-  --------------
-  minimum time:     446.200 μs (0.00% GC)
-  median time:      454.500 μs (0.00% GC)
-  mean time:        530.258 μs (11.39% GC)
-  maximum time:     7.596 ms (93.28% GC)
-  --------------
-  samples:          9391
-  evals/sample:     1
-
- - - -
-@benchmark solve(prob,Tsit5(),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  6.13 KiB
-  allocs estimate:  73
-  --------------
-  minimum time:     354.800 μs (0.00% GC)
-  median time:      358.501 μs (0.00% GC)
-  mean time:        366.696 μs (0.38% GC)
-  maximum time:     7.954 ms (94.13% GC)
-  --------------
-  samples:          10000
-  evals/sample:     1
-
- - -

And that's pretty much all there is to it. With static arrays you don't have to worry about allocating, so use operations like * and don't worry about fusing operations (discussed in the next section). Do "the vectorized code" of R/MATLAB/Python and your code in this case will be fast, or directly use the numbers/values.

-

Exercise 1

-

Implement the out-of-place array, in-place array, and out-of-place static array forms for the Henon-Heiles System and time the results.

-

Optimizing Large Systems

-

Interlude: Managing Allocations with Broadcast Fusion

-

When your system is sufficiently large, or you have to make use of a non-native Julia algorithm, you have to make use of Arrays. In order to use arrays in the most efficient manner, you need to be careful about temporary allocations. Vectorized calculations naturally have plenty of temporary array allocations. This is because a vectorized calculation outputs a vector. Thus:

- - -
-A = rand(1000,1000); B = rand(1000,1000); C = rand(1000,1000)
-test(A,B,C) = A + B + C
-@benchmark test(A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  7.63 MiB
-  allocs estimate:  3
-  --------------
-  minimum time:     3.255 ms (0.00% GC)
-  median time:      3.706 ms (0.00% GC)
-  mean time:        5.055 ms (28.52% GC)
-  maximum time:     11.554 ms (56.93% GC)
-  --------------
-  samples:          988
-  evals/sample:     1
-
- - -

That expression A + B + C creates 2 arrays. It first creates one for the output of A + B, then uses that result array to + C to get the final result. 2 arrays! We don't want that! The first thing to do to fix this is to use broadcast fusion. Broadcast fusion puts expressions together. For example, instead of doing the + operations separately, if we were to add them all at the same time, then we would only have a single array that's created. For example:

- - -
-test2(A,B,C) = map((a,b,c)->a+b+c,A,B,C)
-@benchmark test2(A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  7.63 MiB
-  allocs estimate:  5
-  --------------
-  minimum time:     3.887 ms (0.00% GC)
-  median time:      4.256 ms (0.00% GC)
-  mean time:        5.608 ms (25.30% GC)
-  maximum time:     10.655 ms (59.19% GC)
-  --------------
-  samples:          891
-  evals/sample:     1
-
- - -

Puts the whole expression into a single function call, and thus only one array is required to store output. This is the same as writing the loop:

- - -
-function test3(A,B,C)
-    D = similar(A)
-    @inbounds for i in eachindex(A)
-        D[i] = A[i] + B[i] + C[i]
-    end
-    D
-end
-@benchmark test3(A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  7.63 MiB
-  allocs estimate:  2
-  --------------
-  minimum time:     3.199 ms (0.00% GC)
-  median time:      3.657 ms (0.00% GC)
-  mean time:        5.109 ms (28.64% GC)
-  maximum time:     17.494 ms (56.09% GC)
-  --------------
-  samples:          979
-  evals/sample:     1
-
- - -

However, Julia's broadcast is syntactic sugar for this. If multiple expressions have a ., then it will put those vectorized operations together. Thus:

- - -
-test4(A,B,C) = A .+ B .+ C
-@benchmark test4(A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  7.63 MiB
-  allocs estimate:  2
-  --------------
-  minimum time:     3.292 ms (0.00% GC)
-  median time:      3.821 ms (0.00% GC)
-  mean time:        5.147 ms (28.61% GC)
-  maximum time:     19.458 ms (76.18% GC)
-  --------------
-  samples:          972
-  evals/sample:     1
-
- - -

is a version with only 1 array created (the output). Note that .s can be used with function calls as well:

- - -
-sin.(A) .+ sin.(B)
-
- - -
-1000×1000 Array{Float64,2}:
- 1.36094   0.876109  0.533788  1.12212   …  0.0699826  1.32979   1.45563 
- 1.29362   1.23442   1.14458   0.5981       1.13544    1.21092   0.552619
- 0.976899  0.335794  0.435801  1.35982      1.58853    1.44019   0.951693
- 1.39791   1.19766   0.95237   0.134948     1.53473    0.739707  0.376413
- 0.799265  0.573556  1.36474   1.30171      0.222765   1.07937   1.587   
- 0.95878   1.48331   1.14766   1.12242   …  0.779548   0.143069  0.737896
- 1.06835   0.460558  0.724454  0.857264     0.975307   0.868925  0.750208
- 0.764275  1.10795   0.855457  0.563373     0.90458    0.7817    1.36902 
- 0.654959  1.22932   0.921794  1.27953      1.21609    0.553689  0.605933
- 1.55159   1.18695   1.57847   0.940593     0.884401   0.805792  1.01356 
- ⋮                                       ⋱                               
- 0.673189  0.663399  1.40331   1.0137       1.36143    1.0532    1.18377 
- 0.692109  1.25781   0.667323  1.41418      1.23672    1.34051   1.46219 
- 1.10767   1.25508   1.21924   0.799353     0.99678    0.577142  0.829469
- 0.514223  0.950555  0.446669  1.02357      0.874545   1.25339   0.92912 
- 0.65586   0.874411  1.41695   1.60379   …  0.607246   0.692978  1.00922 
- 0.785478  0.910873  1.3038    1.01724      0.778267   0.408835  1.45358 
- 1.57424   0.694359  1.30818   0.999019     0.755165   0.484897  1.08723 
- 1.12997   1.14312   0.822742  0.97901      1.40981    0.855103  1.13344 
- 1.15197   0.38698   0.717209  0.927147     0.790823   0.943003  0.291941
-
- - -

Also, the @. macro applys a dot to every operator:

- - -
-test5(A,B,C) = @. A + B + C #only one array allocated
-@benchmark test5(A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  7.63 MiB
-  allocs estimate:  3
-  --------------
-  minimum time:     3.395 ms (0.00% GC)
-  median time:      4.222 ms (0.00% GC)
-  mean time:        5.868 ms (30.45% GC)
-  maximum time:     24.766 ms (79.38% GC)
-  --------------
-  samples:          850
-  evals/sample:     1
-
- - -

Using these tools we can get rid of our intermediate array allocations for many vectorized function calls. But we are still allocating the output array. To get rid of that allocation, we can instead use mutation. Mutating broadcast is done via .=. For example, if we pre-allocate the output:

- - -
-D = zeros(1000,1000);
-
- - - -

Then we can keep re-using this cache for subsequent calculations. The mutating broadcasting form is:

- - -
-test6!(D,A,B,C) = D .= A .+ B .+ C #only one array allocated
-@benchmark test6!(D,A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  0 bytes
-  allocs estimate:  0
-  --------------
-  minimum time:     1.805 ms (0.00% GC)
-  median time:      2.274 ms (0.00% GC)
-  mean time:        2.518 ms (0.00% GC)
-  maximum time:     7.981 ms (0.00% GC)
-  --------------
-  samples:          1975
-  evals/sample:     1
-
- - -

If we use @. before the =, then it will turn it into .=:

- - -
-test7!(D,A,B,C) = @. D = A + B + C #only one array allocated
-@benchmark test7!(D,A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  0 bytes
-  allocs estimate:  0
-  --------------
-  minimum time:     1.768 ms (0.00% GC)
-  median time:      2.191 ms (0.00% GC)
-  mean time:        2.426 ms (0.00% GC)
-  maximum time:     9.081 ms (0.00% GC)
-  --------------
-  samples:          2049
-  evals/sample:     1
-
- - -

Notice that in this case, there is no "output", and instead the values inside of D are what are changed (like with the DiffEq inplace function). Many Julia functions have a mutating form which is denoted with a !. For example, the mutating form of the map is map!:

- - -
-test8!(D,A,B,C) = map!((a,b,c)->a+b+c,D,A,B,C)
-@benchmark test8!(D,A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  32 bytes
-  allocs estimate:  1
-  --------------
-  minimum time:     1.978 ms (0.00% GC)
-  median time:      2.314 ms (0.00% GC)
-  mean time:        2.520 ms (0.00% GC)
-  maximum time:     6.838 ms (0.00% GC)
-  --------------
-  samples:          1976
-  evals/sample:     1
-
- - -

Some operations require using an alternate mutating form in order to be fast. For example, matrix multiplication via * allocates a temporary:

- - -
-@benchmark A*B
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  7.63 MiB
-  allocs estimate:  2
-  --------------
-  minimum time:     13.462 ms (0.00% GC)
-  median time:      17.708 ms (0.00% GC)
-  mean time:        18.361 ms (9.41% GC)
-  maximum time:     28.630 ms (23.92% GC)
-  --------------
-  samples:          272
-  evals/sample:     1
-
- - -

Instead, we can use the mutating form mul! into a cache array to avoid allocating the output:

- - -
-using LinearAlgebra
-@benchmark mul!(D,A,B) # same as D = A * B
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  0 bytes
-  allocs estimate:  0
-  --------------
-  minimum time:     12.439 ms (0.00% GC)
-  median time:      15.580 ms (0.00% GC)
-  mean time:        16.139 ms (0.00% GC)
-  maximum time:     23.747 ms (0.00% GC)
-  --------------
-  samples:          310
-  evals/sample:     1
-
- - -

For repeated calculations this reduced allocation can stop GC cycles and thus lead to more efficient code. Additionally, ***we can fuse together higher level linear algebra operations using BLAS***. The package SugarBLAS.jl makes it easy to write higher level operations like alpha*B*A + beta*C as mutating BLAS calls.

-

Example Optimization: Gierer-Meinhardt Reaction-Diffusion PDE Discretization

-

Let's optimize the solution of a Reaction-Diffusion PDE's discretization. In its discretized form, this is the ODE:

-

\[ -\begin{align} -du &= D_1 (A_y u + u A_x) + \frac{au^2}{v} + \bar{u} - \alpha u\\ -dv &= D_2 (A_y v + v A_x) + a u^2 + \beta v -\end{align} -\]

-

where $u$, $v$, and $A$ are matrices. Here, we will use the simplified version where $A$ is the tridiagonal stencil $[1,-2,1]$, i.e. it's the 2D discretization of the LaPlacian. The native code would be something along the lines of:

- - -
-# Generate the constants
-p = (1.0,1.0,1.0,10.0,0.001,100.0) # a,α,ubar,β,D1,D2
-N = 100
-Ax = Array(Tridiagonal([1.0 for i in 1:N-1],[-2.0 for i in 1:N],[1.0 for i in 1:N-1]))
-Ay = copy(Ax)
-Ax[2,1] = 2.0
-Ax[end-1,end] = 2.0
-Ay[1,2] = 2.0
-Ay[end,end-1] = 2.0
-
-function basic_version!(dr,r,p,t)
-  a,α,ubar,β,D1,D2 = p
-  u = r[:,:,1]
-  v = r[:,:,2]
-  Du = D1*(Ay*u + u*Ax)
-  Dv = D2*(Ay*v + v*Ax)
-  dr[:,:,1] = Du .+ a.*u.*u./v .+ ubar .- α*u
-  dr[:,:,2] = Dv .+ a.*u.*u .- β*v
-end
-
-a,α,ubar,β,D1,D2 = p
-uss = (ubar+β)/α
-vss = (a/β)*uss^2
-r0 = zeros(100,100,2)
-r0[:,:,1] .= uss.+0.1.*rand.()
-r0[:,:,2] .= vss
-
-prob = ODEProblem(basic_version!,r0,(0.0,0.1),p)
-
- - -
-ODEProblem with uType Array{Float64,3} and tType Float64. In-place: true
-timespan: (0.0, 0.1)
-u0: [11.0216 11.0132 … 11.093 11.0379; 11.0447 11.0317 … 11.0928 11.016; … 
-; 11.0043 11.0078 … 11.0809 11.0495; 11.0236 11.0714 … 11.0036 11.0444]
-
-[12.1 12.1 … 12.1 12.1; 12.1 12.1 … 12.1 12.1; … ; 12.1 12.1 … 12.1 12.1; 1
-2.1 12.1 … 12.1 12.1]
-
- - -

In this version we have encoded our initial condition to be a 3-dimensional array, with u[:,:,1] being the A part and u[:,:,2] being the B part.

- - -
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  186.88 MiB
-  allocs estimate:  8589
-  --------------
-  minimum time:     97.539 ms (21.87% GC)
-  median time:      233.933 ms (64.80% GC)
-  mean time:        203.062 ms (59.27% GC)
-  maximum time:     295.498 ms (68.81% GC)
-  --------------
-  samples:          25
-  evals/sample:     1
-
- - -

While this version isn't very efficient,

-

We recommend writing the "high-level" code first, and iteratively optimizing it!

-

The first thing that we can do is get rid of the slicing allocations. The operation r[:,:,1] creates a temporary array instead of a "view", i.e. a pointer to the already existing memory. To make it a view, add @view. Note that we have to be careful with views because they point to the same memory, and thus changing a view changes the original values:

- - -
-A = rand(4)
-@show A
-
- - -
-A = [0.98078, 0.13344, 0.607742, 0.768513]
-
- - - -
-B = @view A[1:3]
-B[2] = 2
-@show A
-
- - -
-A = [0.98078, 2.0, 0.607742, 0.768513]
-4-element Array{Float64,1}:
- 0.980780213650249 
- 2.0               
- 0.6077421301853634
- 0.7685125742720975
-
- - -

Notice that changing B changed A. This is something to be careful of, but at the same time we want to use this since we want to modify the output dr. Additionally, the last statement is a purely element-wise operation, and thus we can make use of broadcast fusion there. Let's rewrite basic_version! to ***avoid slicing allocations*** and to ***use broadcast fusion***:

- - -
-function gm2!(dr,r,p,t)
-  a,α,ubar,β,D1,D2 = p
-  u = @view r[:,:,1]
-  v = @view r[:,:,2]
-  du = @view dr[:,:,1]
-  dv = @view dr[:,:,2]
-  Du = D1*(Ay*u + u*Ax)
-  Dv = D2*(Ay*v + v*Ax)
-  @. du = Du + a.*u.*u./v + ubar - α*u
-  @. dv = Dv + a.*u.*u - β*v
-end
-prob = ODEProblem(gm2!,r0,(0.0,0.1),p)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  119.55 MiB
-  allocs estimate:  7119
-  --------------
-  minimum time:     77.705 ms (16.54% GC)
-  median time:      132.179 ms (45.86% GC)
-  mean time:        166.564 ms (57.45% GC)
-  maximum time:     275.339 ms (66.50% GC)
-  --------------
-  samples:          31
-  evals/sample:     1
-
- - -

Now, most of the allocations are taking place in Du = D1*(Ay*u + u*Ax) since those operations are vectorized and not mutating. We should instead replace the matrix multiplications with mul!. When doing so, we will need to have cache variables to write into. This looks like:

- - -
-Ayu = zeros(N,N)
-uAx = zeros(N,N)
-Du = zeros(N,N)
-Ayv = zeros(N,N)
-vAx = zeros(N,N)
-Dv = zeros(N,N)
-function gm3!(dr,r,p,t)
-  a,α,ubar,β,D1,D2 = p
-  u = @view r[:,:,1]
-  v = @view r[:,:,2]
-  du = @view dr[:,:,1]
-  dv = @view dr[:,:,2]
-  mul!(Ayu,Ay,u)
-  mul!(uAx,u,Ax)
-  mul!(Ayv,Ay,v)
-  mul!(vAx,v,Ax)
-  @. Du = D1*(Ayu + uAx)
-  @. Dv = D2*(Ayv + vAx)
-  @. du = Du + a*u*u./v + ubar - α*u
-  @. dv = Dv + a*u*u - β*v
-end
-prob = ODEProblem(gm3!,r0,(0.0,0.1),p)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  29.76 MiB
-  allocs estimate:  5355
-  --------------
-  minimum time:     52.411 ms (5.36% GC)
-  median time:      54.456 ms (6.91% GC)
-  mean time:        54.943 ms (7.05% GC)
-  maximum time:     63.929 ms (11.50% GC)
-  --------------
-  samples:          91
-  evals/sample:     1
-
- - -

But our temporary variables are global variables. We need to either declare the caches as const or localize them. We can localize them by adding them to the parameters, p. It's easier for the compiler to reason about local variables than global variables. ***Localizing variables helps to ensure type stability***.

- - -
-p = (1.0,1.0,1.0,10.0,0.001,100.0,Ayu,uAx,Du,Ayv,vAx,Dv) # a,α,ubar,β,D1,D2
-function gm4!(dr,r,p,t)
-  a,α,ubar,β,D1,D2,Ayu,uAx,Du,Ayv,vAx,Dv = p
-  u = @view r[:,:,1]
-  v = @view r[:,:,2]
-  du = @view dr[:,:,1]
-  dv = @view dr[:,:,2]
-  mul!(Ayu,Ay,u)
-  mul!(uAx,u,Ax)
-  mul!(Ayv,Ay,v)
-  mul!(vAx,v,Ax)
-  @. Du = D1*(Ayu + uAx)
-  @. Dv = D2*(Ayv + vAx)
-  @. du = Du + a*u*u./v + ubar - α*u
-  @. dv = Dv + a*u*u - β*v
-end
-prob = ODEProblem(gm4!,r0,(0.0,0.1),p)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  29.66 MiB
-  allocs estimate:  1090
-  --------------
-  minimum time:     43.787 ms (5.98% GC)
-  median time:      46.164 ms (6.59% GC)
-  mean time:        46.746 ms (7.42% GC)
-  maximum time:     58.155 ms (13.84% GC)
-  --------------
-  samples:          107
-  evals/sample:     1
-
- - -

We could then use the BLAS gemmv to optimize the matrix multiplications some more, but instead let's devectorize the stencil.

- - -
-p = (1.0,1.0,1.0,10.0,0.001,100.0,N)
-function fast_gm!(du,u,p,t)
-  a,α,ubar,β,D1,D2,N = p
-
-  @inbounds for j in 2:N-1, i in 2:N-1
-    du[i,j,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) +
-              a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-  end
-
-  @inbounds for j in 2:N-1, i in 2:N-1
-    du[i,j,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) +
-            a*u[i,j,1]^2 - β*u[i,j,2]
-  end
-
-  @inbounds for j in 2:N-1
-    i = 1
-    du[1,j,1] = D1*(2u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) +
-            a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-  end
-  @inbounds for j in 2:N-1
-    i = 1
-    du[1,j,2] = D2*(2u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) +
-            a*u[i,j,1]^2 - β*u[i,j,2]
-  end
-  @inbounds for j in 2:N-1
-    i = N
-    du[end,j,1] = D1*(2u[i-1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) +
-           a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-  end
-  @inbounds for j in 2:N-1
-    i = N
-    du[end,j,2] = D2*(2u[i-1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) +
-           a*u[i,j,1]^2 - β*u[i,j,2]
-  end
-
-  @inbounds for i in 2:N-1
-    j = 1
-    du[i,1,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) +
-              a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-  end
-  @inbounds for i in 2:N-1
-    j = 1
-    du[i,1,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) +
-              a*u[i,j,1]^2 - β*u[i,j,2]
-  end
-  @inbounds for i in 2:N-1
-    j = N
-    du[i,end,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) +
-             a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-  end
-  @inbounds for i in 2:N-1
-    j = N
-    du[i,end,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) +
-             a*u[i,j,1]^2 - β*u[i,j,2]
-  end
-
-  @inbounds begin
-    i = 1; j = 1
-    du[1,1,1] = D1*(2u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) +
-              a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-    du[1,1,2] = D2*(2u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) +
-              a*u[i,j,1]^2 - β*u[i,j,2]
-
-    i = 1; j = N
-    du[1,N,1] = D1*(2u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) +
-             a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-    du[1,N,2] = D2*(2u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) +
-             a*u[i,j,1]^2 - β*u[i,j,2]
-
-    i = N; j = 1
-    du[N,1,1] = D1*(2u[i-1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) +
-             a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-    du[N,1,2] = D2*(2u[i-1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) +
-             a*u[i,j,1]^2 - β*u[i,j,2]
-
-    i = N; j = N
-    du[end,end,1] = D1*(2u[i-1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) +
-             a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-    du[end,end,2] = D2*(2u[i-1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) +
-             a*u[i,j,1]^2 - β*u[i,j,2]
-   end
-end
-prob = ODEProblem(fast_gm!,r0,(0.0,0.1),p)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  29.63 MiB
-  allocs estimate:  505
-  --------------
-  minimum time:     10.348 ms (18.86% GC)
-  median time:      11.362 ms (20.33% GC)
-  mean time:        11.881 ms (24.37% GC)
-  maximum time:     22.928 ms (36.90% GC)
-  --------------
-  samples:          421
-  evals/sample:     1
-
- - -

Lastly, we can do other things like multithread the main loops, but these optimizations get the last 2x-3x out. The main optimizations which apply everywhere are the ones we just performed (though the last one only works if your matrix is a stencil. This is known as a matrix-free implementation of the PDE discretization).

-

This gets us to about 8x faster than our original MATLAB/SciPy/R vectorized style code!

-

The last thing to do is then ***optimize our algorithm choice***. We have been using Tsit5() as our test algorithm, but in reality this problem is a stiff PDE discretization and thus one recommendation is to use CVODE_BDF(). However, instead of using the default dense Jacobian, we should make use of the sparse Jacobian afforded by the problem. The Jacobian is the matrix $\frac{df_i}{dr_j}$, where $r$ is read by the linear index (i.e. down columns). But since the $u$ variables depend on the $v$, the band size here is large, and thus this will not do well with a Banded Jacobian solver. Instead, we utilize sparse Jacobian algorithms. CVODE_BDF allows us to use a sparse Newton-Krylov solver by setting linear_solver = :GMRES (see the solver documentation, and thus we can solve this problem efficiently. Let's see how this scales as we increase the integration time.

- - -
-prob = ODEProblem(fast_gm!,r0,(0.0,10.0),p)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  2.76 GiB
-  allocs estimate:  41671
-  --------------
-  minimum time:     2.903 s (29.68% GC)
-  median time:      17.943 s (59.89% GC)
-  mean time:        17.943 s (59.89% GC)
-  maximum time:     32.983 s (62.54% GC)
-  --------------
-  samples:          2
-  evals/sample:     1
-
- - - -
-using Sundials
-@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES))
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  54.30 MiB
-  allocs estimate:  14630
-  --------------
-  minimum time:     250.041 ms (2.08% GC)
-  median time:      257.360 ms (3.40% GC)
-  mean time:        290.219 ms (14.69% GC)
-  maximum time:     378.596 ms (34.21% GC)
-  --------------
-  samples:          18
-  evals/sample:     1
-
- - - -
-prob = ODEProblem(fast_gm!,r0,(0.0,100.0),p)
-# Will go out of memory if we don't turn off `save_everystep`!
-@benchmark solve(prob,Tsit5(),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  2.91 MiB
-  allocs estimate:  113
-  --------------
-  minimum time:     4.722 s (0.00% GC)
-  median time:      4.783 s (0.00% GC)
-  mean time:        4.783 s (0.00% GC)
-  maximum time:     4.844 s (0.00% GC)
-  --------------
-  samples:          2
-  evals/sample:     1
-
- - - -
-@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES))
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  351.14 MiB
-  allocs estimate:  98458
-  --------------
-  minimum time:     1.790 s (0.00% GC)
-  median time:      1.920 s (9.96% GC)
-  mean time:        1.940 s (10.25% GC)
-  maximum time:     2.111 s (19.21% GC)
-  --------------
-  samples:          3
-  evals/sample:     1
-
- - -

Now let's check the allocation growth.

- - -
-@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  4.47 MiB
-  allocs estimate:  87840
-  --------------
-  minimum time:     1.662 s (0.00% GC)
-  median time:      1.675 s (0.00% GC)
-  mean time:        1.684 s (0.00% GC)
-  maximum time:     1.716 s (0.00% GC)
-  --------------
-  samples:          3
-  evals/sample:     1
-
- - - -
-prob = ODEProblem(fast_gm!,r0,(0.0,500.0),p)
-@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  5.72 MiB
-  allocs estimate:  118527
-  --------------
-  minimum time:     2.224 s (0.00% GC)
-  median time:      2.235 s (0.00% GC)
-  mean time:        2.247 s (0.00% GC)
-  maximum time:     2.282 s (0.00% GC)
-  --------------
-  samples:          3
-  evals/sample:     1
-
- - -

Notice that we've elimated almost all allocations, allowing the code to grow without hitting garbage collection and slowing down.

-

Why is CVODE_BDF doing well? What's happening is that, because the problem is stiff, the number of steps required by the explicit Runge-Kutta method grows rapidly, whereas CVODE_BDF is taking large steps. Additionally, the GMRES linear solver form is quite an efficient way to solve the implicit system in this case. This is problem-dependent, and in many cases using a Krylov method effectively requires a preconditioner, so you need to play around with testing other algorithms and linear solvers to find out what works best with your problem.

-

Conclusion

-

Julia gives you the tools to optimize the solver "all the way", but you need to make use of it. The main thing to avoid is temporary allocations. For small systems, this is effectively done via static arrays. For large systems, this is done via in-place operations and cache arrays. Either way, the resulting solution can be immensely sped up over vectorized formulations by using these principles.

- - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("introduction","optimizing_diffeq_code.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/models/classical_physics.html b/html/models/classical_physics.html deleted file mode 100644 index 0b2f0765..00000000 --- a/html/models/classical_physics.html +++ /dev/null @@ -1,1156 +0,0 @@ - - - - - - Classical Physics Models - - - - - - - - - - - - - - - - - -
-
-
- -
-

Classical Physics Models

-
Yingbo Ma, Chris Rackauckas
- -
- -

If you're getting some cold feet to jump in to DiffEq land, here are some handcrafted differential equations mini problems to hold your hand along the beginning of your journey.

-

Radioactive Decay of Carbon-14

-

First order linear ODE

-

\[ -f(t,u) = \frac{du}{dt} -\]

-

The Radioactive decay problem is the first order linear ODE problem of an exponential with a negative coefficient, which represents the half-life of the process in question. Should the coefficient be positive, this would represent a population growth equation.

- - -
-using OrdinaryDiffEq, Plots
-gr()
-
-#Half-life of Carbon-14 is 5,730 years.
-C₁ = 5.730
-
-#Setup
-u₀ = 1.0
-tspan = (0.0, 1.0)
-
-#Define the problem
-radioactivedecay(u,p,t) = -C₁*u
-
-#Pass to solver
-prob = ODEProblem(radioactivedecay,u₀,tspan)
-sol = solve(prob,Tsit5())
-
-#Plot
-plot(sol,linewidth=2,title ="Carbon-14 half-life", xaxis = "Time in thousands of years", yaxis = "Percentage left", label = "Numerical Solution")
-plot!(sol.t, t->exp(-C₁*t),lw=3,ls=:dash,label="Analytical Solution")
-
- - - - -

Simple Pendulum

-

Second Order Linear ODE

-

We will start by solving the pendulum problem. In the physics class, we often solve this problem by small angle approximation, i.e. $ sin(\theta) \approx \theta$, because otherwise, we get an elliptic integral which doesn't have an analytic solution. The linearized form is

-

\[ -\ddot{\theta} + \frac{g}{L}{\theta} = 0 -\]

-

But we have numerical ODE solvers! Why not solve the real pendulum?

-

\[ -\ddot{\theta} + \frac{g}{L}{\sin(\theta)} = 0 -\]

- - -
-# Simple Pendulum Problem
-using OrdinaryDiffEq, Plots
-
-#Constants
-const g = 9.81
-L = 1.0
-
-#Initial Conditions
-u₀ = [0,π/2]
-tspan = (0.0,6.3)
-
-#Define the problem
-function simplependulum(du,u,p,t)
-    θ  = u[1]
-     = u[2]
-    du[1] = 
-    du[2] = -(g/L)*sin(θ)
-end
-
-#Pass to solvers
-prob = ODEProblem(simplependulum,u₀, tspan)
-sol = solve(prob,Tsit5())
-
-#Plot
-plot(sol,linewidth=2,title ="Simple Pendulum Problem", xaxis = "Time", yaxis = "Height", label = ["Theta","dTheta"])
-
- - - - -

So now we know that behaviour of the position versus time. However, it will be useful to us to look at the phase space of the pendulum, i.e., and representation of all possible states of the system in question (the pendulum) by looking at its velocity and position. Phase space analysis is ubiquitous in the analysis of dynamical systems, and thus we will provide a few facilities for it.

- - -
-p = plot(sol,vars = (1,2), xlims = (-9,9), title = "Phase Space Plot", xaxis = "Velocity", yaxis = "Position", leg=false)
-function phase_plot(prob, u0, p, tspan=2pi)
-    _prob = ODEProblem(prob.f,u0,(0.0,tspan))
-    sol = solve(_prob,Vern9()) # Use Vern9 solver for higher accuracy
-    plot!(p,sol,vars = (1,2), xlims = nothing, ylims = nothing)
-end
-for i in -4pi:pi/2:4π
-    for j in -4pi:pi/2:4π
-        phase_plot(prob, [j,i], p)
-    end
-end
-plot(p,xlims = (-9,9))
-
- - - - -

Simple Harmonic Oscillator

-

Double Pendulum

- - -
-#Double Pendulum Problem
-using OrdinaryDiffEq, Plots
-
-#Constants and setup
-const m₁, m₂, L₁, L₂ = 1, 2, 1, 2
-initial = [0, π/3, 0, 3pi/5]
-tspan = (0.,50.)
-
-#Convenience function for transforming from polar to Cartesian coordinates
-function polar2cart(sol;dt=0.02,l1=L₁,l2=L₂,vars=(2,4))
-    u = sol.t[1]:dt:sol.t[end]
-
-    p1 = l1*map(x->x[vars[1]], sol.(u))
-    p2 = l2*map(y->y[vars[2]], sol.(u))
-
-    x1 = l1*sin.(p1)
-    y1 = l1*-cos.(p1)
-    (u, (x1 + l2*sin.(p2),
-     y1 - l2*cos.(p2)))
-end
-
-#Define the Problem
-function double_pendulum(xdot,x,p,t)
-    xdot[1]=x[2]
-    xdot[2]=-((g*(2*m₁+m₂)*sin(x[1])+m₂*(g*sin(x[1]-2*x[3])+2*(L₂*x[4]^2+L₁*x[2]^2*cos(x[1]-x[3]))*sin(x[1]-x[3])))/(2*L₁*(m₁+m₂-m₂*cos(x[1]-x[3])^2)))
-    xdot[3]=x[4]
-    xdot[4]=(((m₁+m₂)*(L₁*x[2]^2+g*cos(x[1]))+L₂*m₂*x[4]^2*cos(x[1]-x[3]))*sin(x[1]-x[3]))/(L₂*(m₁+m₂-m₂*cos(x[1]-x[3])^2))
-end
-
-#Pass to Solvers
-double_pendulum_problem = ODEProblem(double_pendulum, initial, tspan)
-sol = solve(double_pendulum_problem, Vern7(), abs_tol=1e-10, dt=0.05);
-
- - - - -
-#Obtain coordinates in Cartesian Geometry
-ts, ps = polar2cart(sol, l1=L₁, l2=L₂, dt=0.01)
-plot(ps...)
-
- - - - -

Poincaré section

-

The Poincaré section is a contour plot of a higher-dimensional phase space diagram. It helps to understand the dynamic interactions and is wonderfully pretty.

-

The following equation came from StackOverflow question

-

\[ -\frac{d}{dt} - \begin{pmatrix} - \alpha \\ l_\alpha \\ \beta \\ l_\beta - \end{pmatrix}= - \begin{pmatrix} - 2\frac{l_\alpha - (1+\cos\beta)l_\beta}{3-\cos 2\beta} \\ - -2\sin\alpha - \sin(\alpha + \beta) \\ - 2\frac{-(1+\cos\beta)l_\alpha + (3+2\cos\beta)l_\beta}{3-\cos2\beta}\\ - -\sin(\alpha+\beta) - 2\sin(\beta)\frac{(l_\alpha-l_\beta)l_\beta}{3-\cos2\beta} + 2\sin(2\beta)\frac{l_\alpha^2-2(1+\cos\beta)l_\alpha l_\beta + (3+2\cos\beta)l_\beta^2}{(3-\cos2\beta)^2} - \end{pmatrix} -\]

-

The Poincaré section here is the collection of $(β,l_β)$ when $α=0$ and $\frac{dα}{dt}>0$.

-

Hamiltonian of a double pendulum

-

Now we will plot the Hamiltonian of a double pendulum

- - -
-#Constants and setup
-using OrdinaryDiffEq
-initial2 = [0.01, 0.005, 0.01, 0.01]
-tspan2 = (0.,200.)
-
-#Define the problem
-function double_pendulum_hamiltonian(udot,u,p,t)
-    α  = u[1]
-     = u[2]
-    β  = u[3]
-     = u[4]
-    udot .=
-    [2(-(1+cos(β)))/(3-cos(2β)),
-    -2sin(α) - sin(α+β),
-    2(-(1+cos(β)) + (3+2cos(β)))/(3-cos(2β)),
-    -sin(α+β) - 2sin(β)*(((-))/(3-cos(2β))) + 2sin(2β)*((^2 - 2(1+cos(β))* + (3+2cos(β))^2)/(3-cos(2β))^2)]
-end
-
-# Construct a ContiunousCallback
-condition(u,t,integrator) = u[1]
-affect!(integrator) = nothing
-cb = ContinuousCallback(condition,affect!,nothing,
-                        save_positions = (true,false))
-
-# Construct Problem
-poincare = ODEProblem(double_pendulum_hamiltonian, initial2, tspan2)
-sol2 = solve(poincare, Vern9(), save_everystep = false, callback=cb, abstol=1e-9)
-
-function poincare_map(prob, u₀, p; callback=cb)
-    _prob = ODEProblem(prob.f,[0.01, 0.01, 0.01, u₀],prob.tspan)
-    sol = solve(_prob, Vern9(), save_everystep = false, callback=cb, abstol=1e-9)
-    scatter!(p, sol, vars=(3,4), markersize = 2)
-end
-
- - -
-poincare_map (generic function with 1 method)
-
- - - -
-p = scatter(sol2, vars=(3,4), leg=false, markersize = 2, ylims=(-0.01,0.03))
-for i in -0.01:0.00125:0.01
-    poincare_map(poincare, i, p)
-end
-plot(p,ylims=(-0.01,0.03))
-
- - - - -

Hénon-Heiles System

-

The Hénon-Heiles potential occurs when non-linear motion of a star around a galactic center with the motion restricted to a plane.

-

\[ -\begin{align} -\frac{d^2x}{dt^2}&=-\frac{\partial V}{\partial x}\\ -\frac{d^2y}{dt^2}&=-\frac{\partial V}{\partial y} -\end{align} -\]

-

where

-

\[ -V(x,y)={\frac {1}{2}}(x^{2}+y^{2})+\lambda \left(x^{2}y-{\frac {y^{3}}{3}}\right). -\]

-

We pick $\lambda=1$ in this case, so

-

\[ -V(x,y) = \frac{1}{2}(x^2+y^2+2x^2y-\frac{2}{3}y^3). -\]

-

Then the total energy of the system can be expressed by

-

\[ -E = T+V = V(x,y)+\frac{1}{2}(\dot{x}^2+\dot{y}^2). -\]

-

The total energy should conserve as this system evolves.

- - -
-using OrdinaryDiffEq, Plots
-
-#Setup
-initial = [0.,0.1,0.5,0]
-tspan = (0,100.)
-
-#Remember, V is the potential of the system and T is the Total Kinetic Energy, thus E will
-#the total energy of the system.
-V(x,y) = 1//2 * (x^2 + y^2 + 2x^2*y - 2//3 * y^3)
-E(x,y,dx,dy) = V(x,y) + 1//2 * (dx^2 + dy^2);
-
-#Define the function
-function Hénon_Heiles(du,u,p,t)
-    x  = u[1]
-    y  = u[2]
-    dx = u[3]
-    dy = u[4]
-    du[1] = dx
-    du[2] = dy
-    du[3] = -x - 2x*y
-    du[4] = y^2 - y -x^2
-end
-
-#Pass to solvers
-prob = ODEProblem(Hénon_Heiles, initial, tspan)
-sol = solve(prob, Vern9(), abs_tol=1e-16, rel_tol=1e-16);
-
- - - - -
-# Plot the orbit
-plot(sol, vars=(1,2), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false)
-
- - - - - -
-#Optional Sanity check - what do you think this returns and why?
-@show sol.retcode
-
- - -
-sol.retcode = :Success
-
- - - -
-#Plot -
-plot(sol, vars=(1,3), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity")
-plot!(sol, vars=(2,4), leg = false)
-
- - - - - -
-#We map the Total energies during the time intervals of the solution (sol.u here) to a new vector
-#pass it to the plotter a bit more conveniently
-energy = map(x->E(x...), sol.u)
-
-#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great.
-@show ΔE = energy[1]-energy[end]
-
- - -
-ΔE = energy[1] - energy[end] = -3.099845153070602e-5
-
- - - -
-#Plot
-plot(sol.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy")
-
- - - - -

Symplectic Integration

-

To prevent energy drift, we can instead use a symplectic integrator. We can directly define and solve the SecondOrderODEProblem:

- - -
-function HH_acceleration!(dv,v,u,p,t)
-    x,y  = u
-    dx,dy = dv
-    dv[1] = -x - 2x*y
-    dv[2] = y^2 - y -x^2
-end
-initial_positions = [0.0,0.1]
-initial_velocities = [0.5,0.0]
-prob = SecondOrderODEProblem(HH_acceleration!,initial_velocities,initial_positions,tspan)
-sol2 = solve(prob, KahanLi8(), dt=1/10);
-
- - - -

Notice that we get the same results:

- - -
-# Plot the orbit
-plot(sol2, vars=(3,4), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false)
-
- - - - - -
-plot(sol2, vars=(3,1), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity")
-plot!(sol2, vars=(4,2), leg = false)
-
- - - - -

but now the energy change is essentially zero:

- - -
-energy = map(x->E(x[3], x[4], x[1], x[2]), sol2.u)
-#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great.
-@show ΔE = energy[1]-energy[end]
-
- - -
-ΔE = energy[1] - energy[end] = 9.020562075079397e-15
-
- - - -
-#Plot
-plot(sol2.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy")
-
- - - - -

It's so close to zero it breaks GR! And let's try to use a Runge-Kutta-Nyström solver to solve this. Note that Runge-Kutta-Nyström isn't symplectic.

- - -
-sol3 = solve(prob, DPRKN6());
-energy = map(x->E(x[3], x[4], x[1], x[2]), sol3.u)
-@show ΔE = energy[1]-energy[end]
-
- - -
-ΔE = energy[1] - energy[end] = -8.017994408110463e-6
-
- - - -
-gr()
-plot(sol3.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy")
-
- - - - -

Note that we are using the DPRKN6 sovler at reltol=1e-3 (the default), yet it has a smaller energy variation than Vern9 at abs_tol=1e-16, rel_tol=1e-16. Therefore, using specialized solvers to solve its particular problem is very efficient.

- - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("models","classical_physics.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/models/conditional_dosing.html b/html/models/conditional_dosing.html deleted file mode 100644 index b2e5431a..00000000 --- a/html/models/conditional_dosing.html +++ /dev/null @@ -1,863 +0,0 @@ - - - - - - Conditional Dosing Pharmacometric Example - - - - - - - - - - - - - - - - - -
-
-
- -
-

Conditional Dosing Pharmacometric Example

-
Chris Rackauckas
- -
- -

In this example we will show how to model a conditional dosing using the DiscreteCallbacks. The problem is as follows. The patient has a drug A(t) in their system. The concentration of the drug is given as C(t)=A(t)/V for some volume constant V. At t=4, the patient goes to the clinic and is checked. If the concentration of the drug in their body is below 4, then they will receive a new dose.

-

For our model, we will use the simple decay equation. We will write this in the in-place form to make it easy to extend to more complicated examples:

- - -
-using DifferentialEquations
-function f(du,u,p,t)
-    du[1] = -u[1]
-end
-u0 = [10.0]
-const V = 1
-prob = ODEProblem(f,u0,(0.0,10.0))
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 10.0)
-u0: [10.0]
-
- - -

Let's see what the solution looks like without any events.

- - -
-sol = solve(prob,Tsit5())
-using Plots; gr()
-plot(sol)
-
- - - - -

We see that at time t=4, the patient should receive a dose. Let's code up that event. We need to check at t=4 if the concentration u[1]/4 is <4, and if so, add 10 to u[1]. We do this with the following:

- - -
-condition(u,t,integrator) = t==4 && u[1]/V<4
-affect!(integrator) = integrator.u[1] += 10
-cb = DiscreteCallback(condition,affect!)
-
- - -
-DiffEqBase.DiscreteCallback{typeof(Main.WeaveSandBox14.condition),typeof(Ma
-in.WeaveSandBox14.affect!),typeof(DiffEqBase.INITIALIZE_DEFAULT)}(Main.Weav
-eSandBox14.condition, Main.WeaveSandBox14.affect!, DiffEqBase.INITIALIZE_DE
-FAULT, Bool[true, true])
-
- - -

Now we will give this callback to the solver, and tell it to stop at t=4 so that way the condition can be checked:

- - -
-sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb)
-using Plots; gr()
-plot(sol)
-
- - - - -

Let's show that it actually added 10 instead of setting the value to 10. We could have set the value using affect!(integrator) = integrator.u[1] = 10

- - -
-println(sol(4.00000))
-
- - -
-[0.183164]
-
- - - -
-println(sol(4.000000000001))
-
- - -
-[10.1832]
-
- - -

Now let's model a patient whose decay rate for the drug is lower:

- - -
-function f(du,u,p,t)
-    du[1] = -u[1]/6
-end
-u0 = [10.0]
-const V = 1
-prob = ODEProblem(f,u0,(0.0,10.0))
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 10.0)
-u0: [10.0]
-
- - - -
-sol = solve(prob,Tsit5())
-using Plots; gr()
-plot(sol)
-
- - - - -

Under the same criteria, with the same event, this patient will not receive a second dose:

- - -
-sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb)
-using Plots; gr()
-plot(sol)
-
- - - - - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("models","conditional_dosing.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/models/diffeqbio_II_networkproperties.html b/html/models/diffeqbio_II_networkproperties.html deleted file mode 100644 index bdb6c1ea..00000000 --- a/html/models/diffeqbio_II_networkproperties.html +++ /dev/null @@ -1,1429 +0,0 @@ - - - - - - DiffEqBiological Tutorial II: Network Properties API - - - - - - - - - - - - - - - - - -
-
-
- -
-

DiffEqBiological Tutorial II: Network Properties API

-
Samuel Isaacson
- -
- -

The DiffEqBiological API provides a collection of functions for easily accessing network properties, and for incrementally building and extending a network. In this tutorial we'll go through the API, and then illustrate how to programmatically construct a network.

-

We'll illustrate the API using a toggle-switch like network that contains a variety of different reaction types:

- - -
-using DifferentialEquations, DiffEqBiological, Latexify, Plots
-fmt = :svg
-pyplot(fmt=fmt)
-rn = @reaction_network begin
-    hillr(D₂,α,K,n),  --> m₁
-    hillr(D₁,α,K,n),  --> m₂
-    (δ,γ), m₁  
-    (δ,γ), m₂  
-    β, m₁ --> m₁ + P₁
-    β, m₂ --> m₂ + P₂
-    μ, P₁ --> 
-    μ, P₂ --> 
-    (k₊,k₋), 2P₁  D₁ 
-    (k₊,k₋), 2P₂  D₂
-    (k₊,k₋), P₁+P₂  T
-end α K n δ γ β μ k₊ k₋;
-
- - - -

This corresponds to the chemical reaction network given by

- - -
-latexify(rn; env=:chemical)
-
- - - - -\begin{align*} -\require{mhchem} -\ce{ \varnothing &->[\frac{\alpha \cdot K^{n}}{K^{n} + D_2^{n}}] m_{1}}\\ -\ce{ \varnothing &->[\frac{\alpha \cdot K^{n}}{K^{n} + D_1^{n}}] m_{2}}\\ -\ce{ m_{1} &<=>[\delta][\gamma] \varnothing}\\ -\ce{ m_{2} &<=>[\delta][\gamma] \varnothing}\\ -\ce{ m_{1} &->[\beta] m_{1} + P_{1}}\\ -\ce{ m_{2} &->[\beta] m_{2} + P_{2}}\\ -\ce{ P_{1} &->[\mu] \varnothing}\\ -\ce{ P_{2} &->[\mu] \varnothing}\\ -\ce{ 2 \cdot P_1 &<=>[k_{+}][k_{-}] D_{1}}\\ -\ce{ 2 \cdot P_2 &<=>[k_{+}][k_{-}] D_{2}}\\ -\ce{ P_{1} + P_{2} &<=>[k_{+}][k_{-}] T} -\end{align*} - - -
-

Network Properties

-

Basic properties of the generated network include the speciesmap and paramsmap functions we examined in the last tutorial, along with the corresponding species and params functions:

- - -
-species(rn)
-
- - -
-7-element Array{Symbol,1}:
- :m₁
- :m₂
- :P₁
- :P₂
- :D₁
- :D₂
- :T
-
- - - -
-params(rn)
-
- - -
-9-element Array{Symbol,1}:
- :α 
- :K 
- :n 
- :δ 
- :γ 
- :β 
- :μ 
- :k₊
- :k₋
-
- - -

The numbers of species, parameters and reactions can be accessed using numspecies(rn), numparams(rn) and numreactions(rn).

-

A number of functions are available to access properties of reactions within the generated network, including substrates, products, dependents, ismassaction, substratestoich, substratesymstoich, productstoich, productsymstoich, and netstoich. Each of these functions takes two arguments, the reaction network rn and the index of the reaction to query information about. For example, to find the substrate symbols and their corresponding stoichiometries for the 11th reaction, 2P₁ --> D₁, we would use

- - -
-substratesymstoich(rn, 11)
-
- - -
-1-element Array{DiffEqBiological.ReactantStruct,1}:
- DiffEqBiological.ReactantStruct(:P₁, 2)
-
- - -

Broadcasting works on all these functions, allowing the construction of a vector holding the queried information across all reactions, i.e.

- - -
-substratesymstoich.(rn, 1:numreactions(rn))
-
- - -
-16-element Array{Array{DiffEqBiological.ReactantStruct,1},1}:
- []                                              
- []                                              
- [ReactantStruct(:m₁, 1)]                        
- []                                              
- [ReactantStruct(:m₂, 1)]                        
- []                                              
- [ReactantStruct(:m₁, 1)]                        
- [ReactantStruct(:m₂, 1)]                        
- [ReactantStruct(:P₁, 1)]                        
- [ReactantStruct(:P₂, 1)]                        
- [ReactantStruct(:P₁, 2)]                        
- [ReactantStruct(:D₁, 1)]                        
- [ReactantStruct(:P₂, 2)]                        
- [ReactantStruct(:D₂, 1)]                        
- [ReactantStruct(:P₁, 1), ReactantStruct(:P₂, 1)]
- [ReactantStruct(:T, 1)]
-
- - -

To see the net stoichiometries for all reactions we would use

- - -
-netstoich.(rn, 1:numreactions(rn))
-
- - -
-16-element Array{Array{Pair{Int64,Int64},1},1}:
- [1=>1]              
- [2=>1]              
- [1=>-1]             
- [1=>1]              
- [2=>-1]             
- [2=>1]              
- [3=>1]              
- [4=>1]              
- [3=>-1]             
- [4=>-1]             
- [3=>-2, 5=>1]       
- [3=>2, 5=>-1]       
- [4=>-2, 6=>1]       
- [4=>2, 6=>-1]       
- [3=>-1, 4=>-1, 7=>1]
- [3=>1, 4=>1, 7=>-1]
-
- - -

Here the first integer in each pair corresponds to the index of the species (with symbol species(rn)[index]). The second integer corresponds to the net stoichiometric coefficient of the species within the reaction. substratestoich and productstoich are defined similarly.

-

Several functions are also provided that calculate different types of dependency graphs. These include rxtospecies_depgraph, which provides a mapping from reaction index to the indices of species whose population changes when the reaction occurs:

- - -
-rxtospecies_depgraph(rn)
-
- - -
-16-element Array{Array{Int64,1},1}:
- [1]      
- [2]      
- [1]      
- [1]      
- [2]      
- [2]      
- [3]      
- [4]      
- [3]      
- [4]      
- [3, 5]   
- [3, 5]   
- [4, 6]   
- [4, 6]   
- [3, 4, 7]
- [3, 4, 7]
-
- - -

Here the last row indicates that the species with indices [3,4,7] will change values when the reaction T --> P₁ + P₂ occurs. To confirm these are the correct species we can look at

- - -
-species(rn)[[3,4,7]]
-
- - -
-3-element Array{Symbol,1}:
- :P₁
- :P₂
- :T
-
- - -

The speciestorx_depgraph similarly provides a mapping from species to reactions for which their rate laws depend on that species. These correspond to all reactions for which the given species is in the dependent set of the reaction. We can verify this for the first species, m₁:

- - -
-speciestorx_depgraph(rn)[1]
-
- - -
-2-element Array{Int64,1}:
- 3
- 7
-
- - - -
-findall(depset -> in(:m₁, depset), dependents.(rn, 1:numreactions(rn)))
-
- - -
-2-element Array{Int64,1}:
- 3
- 7
-
- - -

Finally, rxtorx_depgraph provides a mapping that shows when a given reaction occurs, which other reactions have rate laws that involve species whose value would have changed:

- - -
-rxtorx_depgraph(rn)
-
- - -
-16-element Array{Array{Int64,1},1}:
- [1, 3, 7]              
- [2, 5, 8]              
- [3, 7]                 
- [3, 4, 7]              
- [5, 8]                 
- [5, 6, 8]              
- [7, 9, 11, 15]         
- [8, 10, 13, 15]        
- [9, 11, 15]            
- [10, 13, 15]           
- [2, 9, 11, 12, 15]     
- [2, 9, 11, 12, 15]     
- [1, 10, 13, 14, 15]    
- [1, 10, 13, 14, 15]    
- [9, 10, 11, 13, 15, 16]
- [9, 10, 11, 13, 15, 16]
-
- - -

Note on Using Network Property API Functions

-

Many basic network query and reaction property functions are simply accessors, returning information that is already stored within the generated reaction_network. For these functions, modifying the returned data structures may lead to inconsistent internal state within the network. As such, they should be used for accessing, but not modifying, network properties. The API documentation indicates which functions return newly allocated data structures and which return data stored within the reaction_network.

-
-

Incremental Construction of Networks

-

The @reaction_network macro is monolithic, in that it not only constructs and stores basic network properties such as the reaction stoichiometries, but also generates everything needed to immediately solve ODE, SDE and jump models using the network. This includes Jacobian functions, noise functions, and jump functions for each reaction. While this allows for a compact interface to the DifferentialEquations.jl solvers, it can also be computationally expensive for large networks, where a user may only wish to solve one type of problem and/or have fine-grained control over what is generated. In addition, some types of reaction network structures are more amenable to being constructed programmatically, as opposed to writing out all reactions by hand within one macro. For these reasons DiffEqBiological provides two additional macros that only initially setup basic reaction network properties, and which can be extended through a programmatic interface: @min_reaction_network and @empty_reaction_network. We now give an introduction to constructing these more minimal network representations, and how they can be programmatically extended. See also the relevant API section.

-

The @min_reaction_network macro works identically to the @reaction_network macro, but the generated network will only be complete with respect to its representation of chemical network properties (i.e. species, parameters and reactions). No ODE, SDE or jump models are generated during the macro call. It can subsequently be extended with the addition of new species, parameters or reactions. The @empty_reaction_network allocates an empty network structure that can also be extended using the programmatic interface. For example, consider a partial version of the toggle-switch like network we defined above:

- - -
-rnmin = @min_reaction_network begin
-    (δ,γ), m₁  
-    (δ,γ), m₂  
-    β, m₁ --> m₁ + P₁
-    β, m₂ --> m₂ + P₂
-    μ, P₁ --> 
-    μ, P₂ --> 
-end δ γ β μ;
-
- - - -

Here we have left out the first two, and last three, reactions from the original reaction_network. To expand the network until it is functionally equivalent to the original model we add back in the missing species, parameters, and finally the missing reactions. Note, it is required that species and parameters be defined before any reactions using them are added. The necessary network extension functions are given by addspecies!, addparam! and addreaction!, and described in the API. To complete rnmin we first add the relevant species:

- - -
-addspecies!(rnmin, :D₁)
-addspecies!(rnmin, :D₂)
-addspecies!(rnmin, :T)
-
- - - -

Next we add the needed parameters

- - -
-addparam!(rnmin, )
-addparam!(rnmin, :K)
-addparam!(rnmin, :n)
-addparam!(rnmin, :k₊)
-addparam!(rnmin, :k₋)
-
- - - -

Note, both addspecies! and addparam! also accept strings encoding the variable names (which are then converted to Symbols internally).

-

We are now ready to add the missing reactions. The API provides two forms of the addreaction! function, one takes expressions analogous to what one would write in the macro:

- - -
-addreaction!(rnmin, :(hillr(D₁,α,K,n)), :( --> m₂))
-addreaction!(rnmin, :((k₊,k₋)), :(2P₂  D₂))
-addreaction!(rnmin, :k₊, :(2P₁ --> D₁))
-addreaction!(rnmin, :k₋, :(D₁ --> 2P₁))
-
- - - -

The rate can be an expression or symbol as above, but can also just be a numeric value. The second form of addreaction! takes tuples of Pair{Symbol,Int} that encode the stoichiometric coefficients of substrates and reactants:

- - -
-# signature is addreaction!(rnmin, paramexpr, substratestoich, productstoich)
-addreaction!(rnmin, :(hillr(D₂,α,K,n)), (), (:m₁ => 1,))
-addreaction!(rnmin, :k₊, (:P₁=>1, :P₂=>1), (:T=>1,))
-addreaction!(rnmin, :k₋, (:T=>1,), (:P₁=>1, :P₂=>1))
-
- - - -

Let's check that rn and rnmin have the same set of species:

- - -
-setdiff(species(rn), species(rnmin))
-
- - -
-0-element Array{Symbol,1}
-
- - -

the same set of params:

- - -
-setdiff(params(rn), params(rnmin))
-
- - -
-0-element Array{Symbol,1}
-
- - -

and the final reaction has the same substrates, reactions, and rate expression:

- - -
-rxidx = numreactions(rn)
-setdiff(substrates(rn, rxidx), substrates(rnmin, rxidx))
-
- - -
-0-element Array{Symbol,1}
-
- - - -
-setdiff(products(rn, rxidx), products(rnmin, rxidx))
-
- - -
-0-element Array{Symbol,1}
-
- - - -
-rateexpr(rn, rxidx) == rateexpr(rnmin, rxidx)
-
- - -
-true
-
- - -
-

Extending Incrementally Generated Networks to Include ODEs, SDEs or Jumps

-

Once a network generated from @min_reaction_network or @empty_reaction_network has had all the associated species, parameters and reactions filled in, corresponding ODE, SDE or jump models can be constructed. The relevant API functions are addodes!, addsdes! and addjumps!. One benefit to contructing models with these functions is that they offer more fine-grained control over what actually gets constructed. For example, addodes! has the optional keyword argument, build_jac, which if set to false will disable construction of symbolic Jacobians and functions for evaluating Jacobians. For large networks this can give a significant speed-up in the time required for constructing an ODE model. Each function and its associated keyword arguments are described in the API section, Functions to add ODEs, SDEs or Jumps to a Network.

-

Let's extend rnmin to include the needed functions for use in ODE solvers:

- - -
-addodes!(rnmin)
-
- - - -

The Generated Functions for Models section of the API shows what functions have been generated. For ODEs these include oderhsfun(rnmin), which returns a function of the form f(du,u,p,t) which evaluates the ODEs (i.e. the time derivatives of u) within du. For each generated function, the corresponding expressions from which it was generated can be retrieved using accessors from the Generated Expressions section of the API. The equations within du can be retrieved using the odeexprs(rnmin) function. For example:

- - -
-odeexprs(rnmin)
-
- - -
-7-element Array{Union{Float64, Int64, Expr, Symbol},1}:
- :((-(δ * m₁) + γ) + (α * K ^ n) / (K ^ n + D₂ ^ n))                       
-               
- :((-(δ * m₂) + γ) + (α * K ^ n) / (K ^ n + D₁ ^ n))                       
-               
- :(((((β * m₁ - μ * P₁) + -2 * k₊ * (P₁ ^ 2 / 2)) + 2 * k₋ * D₁) - k₊ * P₁ 
-* P₂) + k₋ * T)
- :(((((β * m₂ - μ * P₂) + -2 * k₊ * (P₂ ^ 2 / 2)) + 2 * k₋ * D₂) - k₊ * P₁ 
-* P₂) + k₋ * T)
- :(k₊ * (P₁ ^ 2 / 2) - k₋ * D₁)                                            
-               
- :(k₊ * (P₂ ^ 2 / 2) - k₋ * D₂)                                            
-               
- :(k₊ * P₁ * P₂ - k₋ * T)
-
- - -

Using Latexify we can see the ODEs themselves to compare with these expressions:

- - -
-latexify(rnmin)
-
- - - - -\begin{align*} -\frac{dm_1}{dt} =& - \delta \cdot m_1 + \gamma + \frac{\alpha \cdot K^{n}}{K^{n} + D_2^{n}} \\ -\frac{dm_2}{dt} =& - \delta \cdot m_2 + \gamma + \frac{\alpha \cdot K^{n}}{K^{n} + D_1^{n}} \\ -\frac{dP_1}{dt} =& \beta \cdot m_1 - \mu \cdot P_1 -2 \cdot k_+ \cdot \frac{P_1^{2}}{2} + 2 \cdot k_- \cdot D_1 - k_+ \cdot P_1 \cdot P_2 + k_- \cdot T \\ -\frac{dP_2}{dt} =& \beta \cdot m_2 - \mu \cdot P_2 -2 \cdot k_+ \cdot \frac{P_2^{2}}{2} + 2 \cdot k_- \cdot D_2 - k_+ \cdot P_1 \cdot P_2 + k_- \cdot T \\ -\frac{dD_1}{dt} =& k_+ \cdot \frac{P_1^{2}}{2} - k_- \cdot D_1 \\ -\frac{dD_2}{dt} =& k_+ \cdot \frac{P_2^{2}}{2} - k_- \cdot D_2 \\ -\frac{dT}{dt} =& k_+ \cdot P_1 \cdot P_2 - k_- \cdot T \\ -\end{align*} - - -

For ODEs two other functions are generated by addodes!. jacfun(rnmin) will return the generated Jacobian evaluation function, fjac(dJ,u,p,t), which given the current solution u evaluates the Jacobian within dJ. jacobianexprs(rnmin) gives the corresponding matrix of expressions, which can be used with Latexify to see the Jacobian:

- - -
-latexify(jacobianexprs(rnmin))
-
- - - - -\begin{equation*} -\left[ -\begin{array}{ccccccc} - - \delta & 0 & 0 & 0 & 0 & \frac{ - K^{n} \cdot n \cdot \alpha \cdot D_2^{-1 + n}}{\left( K^{n} + D_2^{n} \right)^{2}} & 0 \\ -0 & - \delta & 0 & 0 & \frac{ - K^{n} \cdot n \cdot \alpha \cdot D_1^{-1 + n}}{\left( K^{n} + D_1^{n} \right)^{2}} & 0 & 0 \\ -\beta & 0 & - \mu - 2 \cdot k_+ \cdot P_1 - k_+ \cdot P_2 & - k_+ \cdot P_1 & 2 \cdot k_- & 0 & k_{-} \\ -0 & \beta & - k_+ \cdot P_2 & - \mu - k_+ \cdot P_1 - 2 \cdot k_+ \cdot P_2 & 0 & 2 \cdot k_- & k_{-} \\ -0 & 0 & k_+ \cdot P_1 & 0 & - k_- & 0 & 0 \\ -0 & 0 & 0 & k_+ \cdot P_2 & 0 & - k_- & 0 \\ -0 & 0 & k_+ \cdot P_2 & k_+ \cdot P_1 & 0 & 0 & - k_- \\ -\end{array} -\right] -\end{equation*} - - -

addodes! also generates a function that evaluates the Jacobian of the ODE derivative functions with respect to the parameters. paramjacfun(rnmin) then returns the generated function. It has the form fpjac(dPJ,u,p,t), which given the current solution u evaluates the Jacobian matrix with respect to parameters p within dPJ. For use in DifferentialEquations.jl solvers, an ODEFunction representation of the ODEs is available from odefun(rnmin).

-

addsdes! and addjumps! work similarly to complete the network for use in StochasticDiffEq and DiffEqJump solvers.

-

Note on Using Generated Function and Expression API Functions

-

The generated functions and expressions accessible through the API require first calling the appropriate addodes!, addsdes or addjumps function. These are responsible for actually constructing the underlying functions and expressions. The API accessors simply return already constructed functions and expressions that are stored within the reaction_network structure.

-
-

Example of Generating a Network Programmatically

-

For a user directly typing in a reaction network, it is generally easier to use the @min_reaction_network or @reaction_network macros to fully specify reactions. However, for large, structured networks it can be much easier to generate the network programmatically. For very large networks, with tens of thousands of reactions, the form of addreaction! that uses stoichiometric coefficients should be preferred as it offers substantially better performance. To put together everything we've seen, let's generate the network corresponding to a 1D continuous time random walk, approximating the diffusion of molecules within an interval.

-

The basic "reaction" network we wish to study is

-

\[ -u_1 \leftrightarrows u_2 \leftrightarrows u_3 \cdots \leftrightarrows u_{N} -\]

-

for $N$ lattice sites on $[0,1]$. For $h = 1/N$ the lattice spacing, we'll assume the rate molecules hop from their current site to any particular neighbor is just $h^{-2}$. We can interpret this hopping process as a collection of $2N-2$ "reactions", with the form $u_i \to u_j$ for $j=i+1$ or $j=i-1$. We construct the corresponding reaction network as follows. First we set values for the basic parameters:

- - -
-N = 64
-h = 1 / N
-
- - -
-0.015625
-
- - -

then we create an empty network, and add each species

- - -
-rn = @empty_reaction_network
-
-for i = 1:N
-    addspecies!(rn, Symbol(:u, i))
-end
-
- - - -

We next add one parameter β, which we will set equal to the hopping rate of molecules, $h^{-2}$:

- - -
-addparam!(rn, )
-
- - - -

Finally, we add in the $2N-2$ possible hopping reactions:

- - -
-for i = 1:N
-    (i < N) && addreaction!(rn, , (Symbol(:u,i)=>1,), (Symbol(:u,i+1)=>1,))
-    (i > 1) && addreaction!(rn, , (Symbol(:u,i)=>1,), (Symbol(:u,i-1)=>1,))
-end
-
- - - -

Let's first construct an ODE model for the network

- - -
-addodes!(rn)
-
- - - -

We now need to specify the initial condition, parameter vector and time interval to solve on. We start with 10000 molecules placed at the center of the domain, and setup an ODEProblem to solve:

- - -
-u₀ = zeros(N)
-u₀[div(N,2)] = 10000
-p = [1/(h*h)]
-tspan = (0.,.01)
-oprob = ODEProblem(rn, u₀, tspan, p)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 0.01)
-u0: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0  …  0.0, 0.0, 0.0, 0.
-0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
-
- - -

We are now ready to solve the problem and plot the solution. Since we have essentially generated a method of lines discretization of the diffusion equation with a discontinuous initial condition, we'll use an A-L stable implicit ODE solver, KenCarp4, and plot the solution at a few times:

- - -
-sol = solve(oprob, KenCarp4())
-times = [0., .0001, .001, .01]
-plt = plot()
-for time in times
-    plot!(plt, 1:N, sol(time), fmt=fmt, xlabel="i", ylabel="uᵢ", label=string("t = ", time), lw=3)
-end
-plot(plt, ylims=(0.,10000.))
-
- - - - -

Here we see the characteristic diffusion of molecules from the center of the domain, resulting in a shortening and widening of the solution as $t$ increases.

-

Let's now look at a stochastic chemical kinetics jump process version of the model, where β gives the probability per time each molecule can hop from its current lattice site to an individual neighboring site. We first add in the jumps, disabling regular_jumps since they are not needed, and using the minimal_jumps flag to construct a minimal representation of the needed jumps. We then construct a JumpProblem, and use the Composition-Rejection Direct method, DirectCR, to simulate the process of the molecules hopping about on the lattice:

- - -
-addjumps!(rn, build_regular_jumps=false, minimal_jumps=true)
-
-# make the initial condition integer valued 
-u₀ = zeros(Int, N)
-u₀[div(N,2)] = 10000
-
-# setup and solve the problem
-dprob = DiscreteProblem(rn, u₀, tspan, p)
-jprob = JumpProblem(dprob, DirectCR(), rn, save_positions=(false,false))
-jsol = solve(jprob, SSAStepper(), saveat=times)
-
- - -
-retcode: Default
-Interpolation: Piecewise constant interpolation
-t: 4-element Array{Float64,1}:
- 0.0   
- 0.0001
- 0.001 
- 0.01  
-u: 4-element Array{Array{Int64,1},1}:
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0  …  0, 0, 0, 0, 0, 0, 0, 0, 0, 0]       
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0  …  0, 0, 0, 0, 0, 0, 0, 0, 0, 0]       
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0  …  0, 0, 0, 0, 0, 0, 0, 0, 0, 0]       
- [2, 3, 1, 7, 12, 5, 16, 10, 12, 20  …  9, 17, 8, 10, 5, 4, 4, 2, 0, 0]
-
- - -

We can now plot bar graphs showing the locations of the molecules at the same set of times we examined the ODE solution. For comparison, we also plot the corresponding ODE solutions (red lines) that we found:

- - -
-times = [0., .0001, .001, .01]
-plts = []
-for i = 1:4
-    b = bar(1:N, jsol[i], legend=false, fmt=fmt, xlabel="i", ylabel="uᵢ", title=string("t = ", times[i]))
-    plot!(b,sol(times[i]))
-    push!(plts,b)
-end
-plot(plts...)
-
- - - - -

Similar to the ODE solutions, we see that the molecules spread out and become more and more well-mixed throughout the domain as $t$ increases. The simulation results are noisy due to the finite numbers of molecules present in the stochsatic simulation, but since the number of molecules is large they agree well with the ODE solution at each time.

-
-

Getting Help

-

Have a question related to DiffEqBiological or this tutorial? Feel free to ask in the DifferentialEquations.jl Gitter. If you think you've found a bug in DiffEqBiological, or would like to request/discuss new functionality, feel free to open an issue on Github (but please check there is no related issue already open). If you've found a bug in this tutorial, or have a suggestion, feel free to open an issue on the DiffEqTutorials Github site. Or, submit a pull request to DiffEqTutorials updating the tutorial!

-
- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("models","diffeqbio_II_networkproperties.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: macOS (x86_64-apple-darwin14.5.0)
-  CPU: Intel(R) Core(TM) i7-6920HQ CPU @ 2.90GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[28f2ccd6-bb30-5033-b560-165f7b14dc2f] ApproxFun 0.10.3
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.8
-[c52e3926-4ff0-5f6e-af25-54175e0327b1] Atom 0.7.15
-[aae01518-5342-5314-be14-df237901396f] BandedMatrices 0.8.2
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[ad839575-38b3-5650-b840-f874b8c74a25] Blink 0.10.1
-[336ed68f-0bac-5ca0-87d4-7b16caf5d00b] CSV 0.4.3
-[5d742f6a-9f54-50ce-8119-2520741973ca] CSVFiles 0.14.0
-[a93c6f00-e57d-5684-b7b6-d8193f3e46c0] DataFrames 0.17.1
-[864edb3b-99cc-5e75-8d2d-829cb0a9cfe8] DataStructures 0.15.0
-[2b5f629d-d688-5b77-993f-72d75c75574e] DiffEqBase 5.5.1
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.7.1+
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.2
-[c894b116-72e5-5b58-be3c-e6d8d4ac2b12] DiffEqJump 6.1.1
-[9fdde737-9c7f-55bf-ade8-46b3f136cc48] DiffEqOperators 3.4.0
-[34035eb4-37db-58ae-b003-a3202c898701] DiffEqPDEBase 0.4.0
-[a077e3f3-b75c-5d7f-a0c6-6bc4c8ec64a9] DiffEqProblemLibrary 4.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[aaf54ef3-cdf8-58ed-94cc-d582ad619b94] DistributedArrays 0.6.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.16.4
-[e30172f5-a6a5-5a46-863b-614d45cd2de4] Documenter 0.21.5
-[5789e2e9-d7fb-5bc7-8068-2c6fae9b9549] FileIO 1.0.5
-[069b7b12-0de2-55c6-9aab-29f3d0a68a2e] FunctionWrappers 1.0.0
-[28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71] GR 0.38.1
-[14197337-ba66-59df-a3e3-ca00e7dcff7a] GenericLinearAlgebra 0.1.0
-[19dc6840-f33b-545b-b366-655c7e3ffd49] HCubature 1.3.0
-[f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f] HDF5 0.11.0
-[09f84164-cd44-5f33-b23f-e6b0d136a0d5] HypothesisTests 0.8.0
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[30d91d44-8115-11e8-1d28-c19a5ac16de8] JuAFEM 0.2.0
-[f80590ac-b429-510a-8a99-e7c46989f22d] JuliaFEM 0.5.0
-[e5e0dc1b-0480-54bc-9374-aad01c23163d] Juno 0.5.5
-[b964fa9f-0449-5b57-a5c2-d3ea65f4040f] LaTeXStrings 1.0.3
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.7.0
-[5078a376-72f3-5289-bfd5-ec5146d43c02] LazyArrays 0.6.0
-[23992714-dd62-5051-b70f-ba57cb901cac] MAT 0.5.0
-[1914dd2f-81c6-5fcd-8719-6d5c9610ff09] MacroTools 0.4.5
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.1.0+
-[47be7bcc-f1a6-5447-8b36-7eeeff7534fd] ORCA 0.2.1
-[5fb14364-9ced-5910-84b2-373655c76a03] OhMyREPL 0.5.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[3b7a836e-365b-5785-a47d-02c71176b4aa] PGFPlots 3.0.3
-[9b87118b-4619-50d2-8e1e-99f35a4d4d9d] PackageCompiler 0.6.3
-[d96e819e-fc66-5662-9728-84c9c7592b0a] Parameters 0.10.3
-[58dd65bb-95f3-509e-9936-c39a10fdeae7] Plotly 0.2.0
-[f0f68f2c-4968-5e81-91da-67840de0976a] PlotlyJS 0.12.3
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[27ebfcd6-29c5-5fa9-bf4b-fb8fc14df3ae] Primes 0.4.0
-[c46f51b8-102a-5cf2-8d2c-8597cb0e0da7] ProfileView 0.4.0
-[438e738f-606a-5dbb-bf0a-cddfbfd45ab0] PyCall 1.18.5
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.7.0
-[1fd47b50-473d-5c70-9696-f719f8f3bcdc] QuadGK 2.0.3
-[e6cf234a-135c-5ec9-84dd-332b85af5143] RandomNumbers 1.2.0
-[b4db0fb7-de2a-5028-82bf-5021f5cfa881] ReactionNetworkImporters 0.1.3
-[295af30f-e4ad-537b-8983-00126c2a3abe] Revise 1.1.0
-[c4c386cf-5103-5370-be45-f3a111cca3b8] Rsvg 0.2.3
-[276daf66-3868-5448-9aa4-cd146d93841b] SpecialFunctions 0.7.2
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91] StatsBase 0.29.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.10.2
-[9672c7b4-1e72-59bd-8a11-6ac3964bc41f] SteadyStateDiffEq 1.4.0
-[789caeaf-c7a9-5a7d-9973-96adeb23e2a0] StochasticDiffEq 6.1.1
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.2.0
-[123dc426-2d89-5057-bbad-38513e3affd8] SymEngine 0.5.0
-[e0df1984-e451-5cb5-8b61-797a481e67e3] TextParse 0.7.5
-[a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f] TimerOutputs 0.5.0
-[37b6cedf-1f77-55f8-9503-c64b63398394] Traceur 0.3.0
-[39424ebd-4cf3-5550-a685-96706a953f40] TreeView 0.3.1
-[b8865327-cd53-5732-bb35-84acbb429228] UnicodePlots 1.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[2a06ce6d-1589-592b-9c33-f37faeaed826] UnitfulPlots 0.0.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[0f1e0344-ec1d-5b48-a673-e5cf874b6c29] WebIO 0.7.0
-[9abbd945-dff8-562f-b5e8-e1ebf5ef1b79] Profile
-[2f01184e-e22b-5df5-ae63-d93ebab69eaf] SparseArrays
-
- - - -
- - - -
-
-
- - diff --git a/html/models/diffeqbio_I_introduction.html b/html/models/diffeqbio_I_introduction.html deleted file mode 100644 index e7a9b6be..00000000 --- a/html/models/diffeqbio_I_introduction.html +++ /dev/null @@ -1,1063 +0,0 @@ - - - - - - DiffEqBiological Tutorial I: Introduction - - - - - - - - - - - - - - - - - -
-
-
- -
-

DiffEqBiological Tutorial I: Introduction

-
Samuel Isaacson
- -
- -

DiffEqBiological.jl is a domain specific language (DSL) for writing chemical reaction networks in Julia. The generated chemical reaction network model can then be translated into a variety of mathematical models which can be solved using components of the broader DifferentialEquations.jl ecosystem.

-

In this tutorial we'll provide an introduction to using DiffEqBiological to specify chemical reaction networks, and then to solve ODE, jump, tau-leaping and SDE models generated from them. Let's start by using the DiffEqBiological reaction_network macro to specify a simply chemical reaction network; the well-known Repressilator.

-

We first import the basic packages we'll need, and use Plots.jl for making figures:

- - -
-# If not already installed, first hit "]" within a Julia REPL. Then type:
-# add DifferentialEquations DiffEqBiological PyPlot Plots Latexify 
-
-using DifferentialEquations, DiffEqBiological, Plots, Latexify
-pyplot(fmt=:svg);
-
- - - -

We now construct the reaction network. The basic types of arrows and predefined rate laws one can use are discussed in detail within the DiffEqBiological Chemical Reaction Models documentation. Here we use a mix of first order, zero order and repressive Hill function rate laws. Note, $\varnothing$ corresponds to the empty state, and is used for zeroth order production and first order degradation reactions:

- - -
-repressilator = @reaction_network begin
-    hillr(P₃,α,K,n),  --> m₁
-    hillr(P₁,α,K,n),  --> m₂
-    hillr(P₂,α,K,n),  --> m₃
-    (δ,γ), m₁  
-    (δ,γ), m₂  
-    (δ,γ), m₃  
-    β, m₁ --> m₁ + P₁
-    β, m₂ --> m₂ + P₂
-    β, m₃ --> m₃ + P₃
-    μ, P₁ --> 
-    μ, P₂ --> 
-    μ, P₃ --> 
-end α K n δ γ β μ;
-
- - - -

We can use Latexify to look at the corresponding reactions and understand the generated rate laws for each reaction

- - -
-latexify(repressilator; env=:chemical)
-
- - - - -\begin{align*} -\require{mhchem} -\ce{ \varnothing &->[\frac{\alpha \cdot K^{n}}{K^{n} + P_3^{n}}] m_{1}}\\ -\ce{ \varnothing &->[\frac{\alpha \cdot K^{n}}{K^{n} + P_1^{n}}] m_{2}}\\ -\ce{ \varnothing &->[\frac{\alpha \cdot K^{n}}{K^{n} + P_2^{n}}] m_{3}}\\ -\ce{ m_{1} &<=>[\delta][\gamma] \varnothing}\\ -\ce{ m_{2} &<=>[\delta][\gamma] \varnothing}\\ -\ce{ m_{3} &<=>[\delta][\gamma] \varnothing}\\ -\ce{ m_{1} &->[\beta] m_{1} + P_{1}}\\ -\ce{ m_{2} &->[\beta] m_{2} + P_{2}}\\ -\ce{ m_{3} &->[\beta] m_{3} + P_{3}}\\ -\ce{ P_{1} &->[\mu] \varnothing}\\ -\ce{ P_{2} &->[\mu] \varnothing}\\ -\ce{ P_{3} &->[\mu] \varnothing} -\end{align*} - - -

We can also use Latexify to look at the corresponding ODE model for the chemical system

- - -
-latexify(repressilator)
-
- - - - -\begin{align*} -\frac{dm_1}{dt} =& \frac{\alpha \cdot K^{n}}{K^{n} + P_3^{n}} - \delta \cdot m_1 + \gamma \\ -\frac{dm_2}{dt} =& \frac{\alpha \cdot K^{n}}{K^{n} + P_1^{n}} - \delta \cdot m_2 + \gamma \\ -\frac{dm_3}{dt} =& \frac{\alpha \cdot K^{n}}{K^{n} + P_2^{n}} - \delta \cdot m_3 + \gamma \\ -\frac{dP_1}{dt} =& \beta \cdot m_1 - \mu \cdot P_1 \\ -\frac{dP_2}{dt} =& \beta \cdot m_2 - \mu \cdot P_2 \\ -\frac{dP_3}{dt} =& \beta \cdot m_3 - \mu \cdot P_3 \\ -\end{align*} - - -

To solve the ODEs we need to specify the values of the parameters in the model, the initial condition, and the time interval to solve the model on. To do this it helps to know the orderings of the parameters and the species. Parameters are ordered in the same order they appear after the end statement in the @reaction_network macro. Species are ordered in the order they first appear within the @reaction_network macro. We can see these orderings using the speciesmap and paramsmap functions:

- - -
-speciesmap(repressilator)
-
- - -
-OrderedCollections.OrderedDict{Symbol,Int64} with 6 entries:
-  :m₁ => 1
-  :m₂ => 2
-  :m₃ => 3
-  :P₁ => 4
-  :P₂ => 5
-  :P₃ => 6
-
- - - -
-paramsmap(repressilator)
-
- - -
-OrderedCollections.OrderedDict{Symbol,Int64} with 7 entries:
-  :α => 1
-  :K => 2
-  :n => 3
-  :δ => 4
-  :γ => 5
-  :β => 6
-  :μ => 7
-
- - -

Solving the ODEs:

-

Knowing these orderings, we can create parameter and initial condition vectors, and setup the ODEProblem we want to solve:

- - -
-# parameters [α,K,n,δ,γ,β,μ]
-p = (.5, 40, 2, log(2)/120, 5e-3, 20*log(2)/120, log(2)/60)
-
-# initial condition [m₁,m₂,m₃,P₁,P₂,P₃]
-u₀ = [0.,0.,0.,20.,0.,0.]
-
-# time interval to solve on
-tspan = (0., 10000.)
-
-# create the ODEProblem we want to solve
-oprob = ODEProblem(repressilator, u₀, tspan, p)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 10000.0)
-u0: [0.0, 0.0, 0.0, 20.0, 0.0, 0.0]
-
- - -

At this point we are all set to solve the ODEs. We can now use any ODE solver from within the DiffEq package. We'll just use the default DifferentialEquations solver for now, and then plot the solutions:

- - -
-sol = solve(oprob, saveat=10.)
-plot(sol, fmt=:svg)
-
- - - - -

We see the well-known oscillatory behavior of the repressilator! For more on choices of ODE solvers, see the JuliaDiffEq documentation.

-
-

Stochastic Simulation Algorithms (SSAs) for Stochastic Chemical Kinetics

-

Let's now look at a stochastic chemical kinetics model of the repressilator, modeling it with jump processes. Here we will construct a DiffEqJump JumpProblem that uses Gillespie's Direct method, and then solve it to generate one realization of the jump process:

- - -
-# first we redefine the initial condition to be integer valued
-u₀ = [0,0,0,20,0,0]
-
-# next we create a discrete problem to encode that our species are integer valued:
-dprob = DiscreteProblem(repressilator, u₀, tspan, p)
-
-# now we create a JumpProblem, and specify Gillespie's Direct Method as the solver:
-jprob = JumpProblem(dprob, Direct(), repressilator, save_positions=(false,false))
-
-# now let's solve and plot the jump process:
-sol = solve(jprob, SSAStepper(), saveat=10.)
-plot(sol, fmt=:svg)
-
- - - - -

Here we see that oscillations remain, but become much noiser. Note, in constructing the JumpProblem we could have used any of the SSAs that are part of DiffEqJump instead of the Direct method, see the list of SSAs (i.e. constant rate jump aggregators) in the documentation.

-
-

$\tau$-leaping Methods:

-

While SSAs generate exact realizations for stochastic chemical kinetics jump process models, $\tau$-leaping methods offer a performant alternative by discretizing in time the underlying time-change representation of the stochastic process. The DiffEqJump package has limited support for $\tau$-leaping methods in the form of the basic Euler's method type approximation proposed by Gillespie. We can simulate a $\tau$-leap approximation to the repressilator by using the RegularJump representation of the network to construct a JumpProblem:

- - -
-rjs = regularjumps(repressilator)
-lprob = JumpProblem(dprob, Direct(), rjs)
-lsol = solve(lprob, SimpleTauLeaping(), dt=.1)
-plot(lsol, plotdensity=1000, fmt=:svg)
-
- - - - -
-

Chemical Langevin Equation (CLE) Stochastic Differential Equation (SDE) Models:

-

At an intermediary physical scale between macroscopic ODE models and microscopic stochastic chemical kinetic models lies the CLE, a SDE version of the model. The SDEs add to each ODE above a noise term. As the repressilator has species that get very close to zero in size, it is not a good candidate to model with the CLE (where solutions can then go negative and become unphysical). Let's create a simpler reaction network for a birth-death process that will stay non-negative:

- - -
-bdp = @reaction_network begin
-  c₁, X --> 2X
-  c₂, X --> 0
-  c₃, 0 --> X
-end c₁ c₂ c₃
-p = (1.0,2.0,50.)
-u₀ = [5.]
-tspan = (0.,4.);
-
- - - -

The corresponding Chemical Langevin Equation SDE is then

-

\[ -dX_t = \left(c_1 X - c_2 X + c_3 \right) dt + \left( \sqrt{c_1 X} - \sqrt{c_2 X} + \sqrt{c_3} \right)dW_t, -\]

-

where $W_t$ denotes a standard Brownian Motion. We can solve the CLE SDE model by creating an SDEProblem and solving it similar to what we did for ODEs above:

- - -
-# SDEProblem for CLE
-sprob = SDEProblem(bdp, u₀, tspan, p)
-
-# solve and plot, tstops is used to specify enough points 
-# that the plot looks well-resolved
-sol = solve(sprob, tstops=range(0., step=4e-3, length=1001))
-plot(sol, fmt=:svg)
-
- - - - -

We again have complete freedom to select any of the StochasticDifferentialEquations.jl SDE solvers, see the documentation.

-
-

What information can be queried from the reaction_network:

-

The generated reaction_network contains a lot of basic information. For example

-
    -
  • f=oderhsfun(repressilator) is a function f(du,u,p,t) that given the current state vector u and time t fills du with the time derivatives of u (i.e. the right hand side of the ODEs).

    -
  • -
  • jac=jacfun(repressilator) is a function jac(J,u,p,t) that evaluates and returns the Jacobian of the ODEs in J. A corresponding Jacobian matrix of expressions can be accessed using the jacobianexprs function:

    -
  • -
- - -
-latexify(jacobianexprs(repressilator))
-
- - - - -\begin{equation*} -\left[ -\begin{array}{cccccc} - - \delta & 0 & 0 & 0 & 0 & \frac{ - K^{n} \cdot n \cdot \alpha \cdot P_3^{-1 + n}}{\left( K^{n} + P_3^{n} \right)^{2}} \\ -0 & - \delta & 0 & \frac{ - K^{n} \cdot n \cdot \alpha \cdot P_1^{-1 + n}}{\left( K^{n} + P_1^{n} \right)^{2}} & 0 & 0 \\ -0 & 0 & - \delta & 0 & \frac{ - K^{n} \cdot n \cdot \alpha \cdot P_2^{-1 + n}}{\left( K^{n} + P_2^{n} \right)^{2}} & 0 \\ -\beta & 0 & 0 & - \mu & 0 & 0 \\ -0 & \beta & 0 & 0 & - \mu & 0 \\ -0 & 0 & \beta & 0 & 0 & - \mu \\ -\end{array} -\right] -\end{equation*} - - -
    -
  • pjac = paramjacfun(repressilator) is a function pjac(pJ,u,p,t) that evaluates and returns the Jacobian, pJ, of the ODEs with respect to the parameters. This allows reaction_networks to be used in the DifferentialEquations.jl local sensitivity analysis package DiffEqSensitivity.

    -
  • -
-

By default, generated ODEProblems will be passed the corresponding Jacobian function, which will then be used within implicit ODE/SDE methods.

-

The DiffEqBiological API documentation provides a thorough description of the many query functions that are provided to access network properties and generated functions. In DiffEqBiological Tutorial II we'll explore the API.

-
-

Getting Help

-

Have a question related to DiffEqBiological or this tutorial? Feel free to ask in the DifferentialEquations.jl Gitter. If you think you've found a bug in DiffEqBiological, or would like to request/discuss new functionality, feel free to open an issue on Github (but please check there is no related issue already open). If you've found a bug in this tutorial, or have a suggestion, feel free to open an issue on the DiffEqTutorials Github site. Or, submit a pull request to DiffEqTutorials updating the tutorial!

-
- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("models","diffeqbio_I_introduction.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: macOS (x86_64-apple-darwin14.5.0)
-  CPU: Intel(R) Core(TM) i7-6920HQ CPU @ 2.90GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[28f2ccd6-bb30-5033-b560-165f7b14dc2f] ApproxFun 0.10.3
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.8
-[c52e3926-4ff0-5f6e-af25-54175e0327b1] Atom 0.7.15
-[aae01518-5342-5314-be14-df237901396f] BandedMatrices 0.8.2
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[ad839575-38b3-5650-b840-f874b8c74a25] Blink 0.10.1
-[336ed68f-0bac-5ca0-87d4-7b16caf5d00b] CSV 0.4.3
-[5d742f6a-9f54-50ce-8119-2520741973ca] CSVFiles 0.14.0
-[a93c6f00-e57d-5684-b7b6-d8193f3e46c0] DataFrames 0.17.1
-[864edb3b-99cc-5e75-8d2d-829cb0a9cfe8] DataStructures 0.15.0
-[2b5f629d-d688-5b77-993f-72d75c75574e] DiffEqBase 5.5.1
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.7.1
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.2
-[c894b116-72e5-5b58-be3c-e6d8d4ac2b12] DiffEqJump 6.1.1
-[9fdde737-9c7f-55bf-ade8-46b3f136cc48] DiffEqOperators 3.4.0
-[34035eb4-37db-58ae-b003-a3202c898701] DiffEqPDEBase 0.4.0
-[a077e3f3-b75c-5d7f-a0c6-6bc4c8ec64a9] DiffEqProblemLibrary 4.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[aaf54ef3-cdf8-58ed-94cc-d582ad619b94] DistributedArrays 0.6.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.16.4
-[e30172f5-a6a5-5a46-863b-614d45cd2de4] Documenter 0.21.5
-[5789e2e9-d7fb-5bc7-8068-2c6fae9b9549] FileIO 1.0.5
-[069b7b12-0de2-55c6-9aab-29f3d0a68a2e] FunctionWrappers 1.0.0
-[28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71] GR 0.38.1
-[14197337-ba66-59df-a3e3-ca00e7dcff7a] GenericLinearAlgebra 0.1.0
-[19dc6840-f33b-545b-b366-655c7e3ffd49] HCubature 1.3.0
-[f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f] HDF5 0.11.0
-[09f84164-cd44-5f33-b23f-e6b0d136a0d5] HypothesisTests 0.8.0
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[30d91d44-8115-11e8-1d28-c19a5ac16de8] JuAFEM 0.2.0
-[f80590ac-b429-510a-8a99-e7c46989f22d] JuliaFEM 0.5.0
-[e5e0dc1b-0480-54bc-9374-aad01c23163d] Juno 0.5.5
-[b964fa9f-0449-5b57-a5c2-d3ea65f4040f] LaTeXStrings 1.0.3
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.7.0
-[5078a376-72f3-5289-bfd5-ec5146d43c02] LazyArrays 0.6.0
-[23992714-dd62-5051-b70f-ba57cb901cac] MAT 0.5.0
-[1914dd2f-81c6-5fcd-8719-6d5c9610ff09] MacroTools 0.4.5
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.1.0+
-[47be7bcc-f1a6-5447-8b36-7eeeff7534fd] ORCA 0.2.1
-[5fb14364-9ced-5910-84b2-373655c76a03] OhMyREPL 0.5.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[3b7a836e-365b-5785-a47d-02c71176b4aa] PGFPlots 3.0.3
-[9b87118b-4619-50d2-8e1e-99f35a4d4d9d] PackageCompiler 0.6.3
-[d96e819e-fc66-5662-9728-84c9c7592b0a] Parameters 0.10.3
-[58dd65bb-95f3-509e-9936-c39a10fdeae7] Plotly 0.2.0
-[f0f68f2c-4968-5e81-91da-67840de0976a] PlotlyJS 0.12.3
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.2
-[27ebfcd6-29c5-5fa9-bf4b-fb8fc14df3ae] Primes 0.4.0
-[c46f51b8-102a-5cf2-8d2c-8597cb0e0da7] ProfileView 0.4.0
-[438e738f-606a-5dbb-bf0a-cddfbfd45ab0] PyCall 1.18.5
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.7.0
-[1fd47b50-473d-5c70-9696-f719f8f3bcdc] QuadGK 2.0.3
-[e6cf234a-135c-5ec9-84dd-332b85af5143] RandomNumbers 1.2.0
-[b4db0fb7-de2a-5028-82bf-5021f5cfa881] ReactionNetworkImporters 0.1.3
-[295af30f-e4ad-537b-8983-00126c2a3abe] Revise 1.1.0
-[c4c386cf-5103-5370-be45-f3a111cca3b8] Rsvg 0.2.3
-[276daf66-3868-5448-9aa4-cd146d93841b] SpecialFunctions 0.7.2
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91] StatsBase 0.29.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.10.2
-[9672c7b4-1e72-59bd-8a11-6ac3964bc41f] SteadyStateDiffEq 1.4.0
-[789caeaf-c7a9-5a7d-9973-96adeb23e2a0] StochasticDiffEq 6.1.1
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.2.0
-[123dc426-2d89-5057-bbad-38513e3affd8] SymEngine 0.5.0
-[e0df1984-e451-5cb5-8b61-797a481e67e3] TextParse 0.7.5
-[a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f] TimerOutputs 0.5.0
-[37b6cedf-1f77-55f8-9503-c64b63398394] Traceur 0.3.0
-[39424ebd-4cf3-5550-a685-96706a953f40] TreeView 0.3.1
-[b8865327-cd53-5732-bb35-84acbb429228] UnicodePlots 1.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[2a06ce6d-1589-592b-9c33-f37faeaed826] UnitfulPlots 0.0.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[0f1e0344-ec1d-5b48-a673-e5cf874b6c29] WebIO 0.7.0
-[9abbd945-dff8-562f-b5e8-e1ebf5ef1b79] Profile
-[2f01184e-e22b-5df5-ae63-d93ebab69eaf] SparseArrays
-
- - - -
- - - -
-
-
- - diff --git a/html/models/kepler_problem.html b/html/models/kepler_problem.html deleted file mode 100644 index 25456a73..00000000 --- a/html/models/kepler_problem.html +++ /dev/null @@ -1,946 +0,0 @@ - - - - - - Kepler Problem - - - - - - - - - - - - - - - - - -
-
-
- -
-

Kepler Problem

-
Yingbo Ma, Chris Rackauckas
- -
- -

The Hamiltonian $\mathcal {H}$ and the angular momentum $L$ for the Kepler problem are

-

\[ -\mathcal {H} = \frac{1}{2}(\dot{q}^2_1+\dot{q}^2_2)-\frac{1}{\sqrt{q^2_1+q^2_2}},\quad -L = q_1\dot{q_2} - \dot{q_1}q_2 -\]

-

Also, we know that

-

\[ -{\displaystyle {\frac {\mathrm {d} {\boldsymbol {p}}}{\mathrm {d} t}}=-{\frac {\partial {\mathcal {H}}}{\partial {\boldsymbol {q}}}}\quad ,\quad {\frac {\mathrm {d} {\boldsymbol {q}}}{\mathrm {d} t}}=+{\frac {\partial {\mathcal {H}}}{\partial {\boldsymbol {p}}}}} -\]

- - -
-using OrdinaryDiffEq, LinearAlgebra, ForwardDiff, Plots; gr()
-H(q,p) = norm(p)^2/2 - inv(norm(q))
-L(q,p) = q[1]*p[2] - p[1]*q[2]
-
-pdot(dp,p,q,params,t) = ForwardDiff.gradient!(dp, q->-H(q, p), q)
-qdot(dq,p,q,params,t) = ForwardDiff.gradient!(dq, p-> H(q, p), p)
-
-initial_position = [.4, 0]
-initial_velocity = [0., 2.]
-initial_cond = (initial_position, initial_velocity)
-initial_first_integrals = (H(initial_cond...), L(initial_cond...))
-tspan = (0,20.)
-prob = DynamicalODEProblem(pdot, qdot, initial_velocity, initial_position, tspan)
-sol = solve(prob, KahanLi6(), dt=1//10);
-
- - - -

Let's plot the orbit and check the energy and angular momentum variation. We know that energy and angular momentum should be constant, and they are also called first integrals.

- - -
-plot_orbit(sol) = plot(sol,vars=(3,4), lab="Orbit", title="Kepler Problem Solution")
-
-function plot_first_integrals(sol, H, L)
-    plot(initial_first_integrals[1].-map(u->H(u[2,:], u[1,:]), sol.u), lab="Energy variation", title="First Integrals")
-    plot!(initial_first_integrals[2].-map(u->L(u[2,:], u[1,:]), sol.u), lab="Angular momentum variation")
-end
-analysis_plot(sol, H, L) = plot(plot_orbit(sol), plot_first_integrals(sol, H, L))
-
- - -
-analysis_plot (generic function with 1 method)
-
- - - -
-analysis_plot(sol, H, L)
-
- - - - -

Let's try to use a Runge-Kutta-Nyström solver to solve this problem and check the first integrals' variation.

- - -
-sol2 = solve(prob, DPRKN6())  # dt is not necessary, because unlike symplectic
-                              # integrators DPRKN6 is adaptive
-@show sol2.u |> length
-
- - -
-sol2.u |> length = 79
-
- - - -
-analysis_plot(sol2, H, L)
-
- - - - -

Let's then try to solve the same problem by the ERKN4 solver, which is specialized for sinusoid-like periodic function

- - -
-sol3 = solve(prob, ERKN4()) # dt is not necessary, because unlike symplectic
-                            # integrators ERKN4 is adaptive
-@show sol3.u |> length
-
- - -
-sol3.u |> length = 52
-
- - - -
-analysis_plot(sol3, H, L)
-
- - - - -

We can see that ERKN4 does a bad job for this problem, because this problem is not sinusoid-like.

-

One advantage of using DynamicalODEProblem is that it can implicitly convert the second order ODE problem to a normal system of first order ODEs, which is solvable for other ODE solvers. Let's use the Tsit5 solver for the next example.

- - -
-sol4 = solve(prob, Tsit5())
-@show sol4.u |> length
-
- - -
-sol4.u |> length = 54
-
- - - -
-analysis_plot(sol4, H, L)
-
- - - - -

Note

-

There is drifting for all the solutions, and high order methods are drifting less because they are more accurate.

-

Conclusion

-
-

Symplectic integrator does not conserve the energy completely at all time, but the energy can come back. In order to make sure that the energy fluctuation comes back eventually, symplectic integrator has to have a fixed time step. Despite the energy variation, symplectic integrator conserves the angular momentum perfectly.

-

Both Runge-Kutta-Nyström and Runge-Kutta integrator do not conserve energy nor the angular momentum, and the first integrals do not tend to come back. An advantage Runge-Kutta-Nyström integrator over symplectic integrator is that RKN integrator can have adaptivity. An advantage Runge-Kutta-Nyström integrator over Runge-Kutta integrator is that RKN integrator has less function evaluation per step. The ERKN4 solver works best for sinusoid-like solutions.

-

Manifold Projection

-

In this example, we know that energy and angular momentum should be conserved. We can achieve this through mainfold projection. As the name implies, it is a procedure to project the ODE solution to a manifold. Let's start with a base case, where mainfold projection isn't being used.

- - -
-using DiffEqCallbacks
-
-plot_orbit2(sol) = plot(sol,vars=(1,2), lab="Orbit", title="Kepler Problem Solution")
-
-function plot_first_integrals2(sol, H, L)
-    plot(initial_first_integrals[1].-map(u->H(u[1:2],u[3:4]), sol.u), lab="Energy variation", title="First Integrals")
-    plot!(initial_first_integrals[2].-map(u->L(u[1:2],u[3:4]), sol.u), lab="Angular momentum variation")
-end
-
-analysis_plot2(sol, H, L) = plot(plot_orbit2(sol), plot_first_integrals2(sol, H, L))
-
-function hamiltonian(du,u,params,t)
-    q, p = u[1:2], u[3:4]
-    qdot(@view(du[1:2]), p, q, params, t)
-    pdot(@view(du[3:4]), p, q, params, t)
-end
-
-prob2 = ODEProblem(hamiltonian, [initial_position; initial_velocity], tspan)
-sol_ = solve(prob2, RK4(), dt=1//5, adaptive=false)
-analysis_plot2(sol_, H, L)
-
- - - - -

There is a significant fluctuation in the first integrals, when there is no mainfold projection.

- - -
-function first_integrals_manifold(residual,u)
-    residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4])
-    residual[3:4] .= initial_first_integrals[2] - L(u[1:2], u[3:4])
-end
-
-cb = ManifoldProjection(first_integrals_manifold)
-sol5 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=cb)
-analysis_plot2(sol5, H, L)
-
- - - - -

We can see that thanks to the manifold projection, the first integrals' variation is very small, although we are using RK4 which is not symplectic. But wait, what if we only project to the energy conservation manifold?

- - -
-function energy_manifold(residual,u)
-    residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4])
-    residual[3:4] .= 0
-end
-energy_cb = ManifoldProjection(energy_manifold)
-sol6 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=energy_cb)
-analysis_plot2(sol6, H, L)
-
- - - - -

There is almost no energy variation but angular momentum varies quite bit. How about only project to the angular momentum conservation manifold?

- - -
-function angular_manifold(residual,u)
-    residual[1:2] .= initial_first_integrals[2] - L(u[1:2], u[3:4])
-    residual[3:4] .= 0
-end
-angular_cb = ManifoldProjection(angular_manifold)
-sol7 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=angular_cb)
-analysis_plot2(sol7, H, L)
-
- - - - -

Again, we see what we expect.

- - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("models","kepler_problem.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/models/outer_solar_system.html b/html/models/outer_solar_system.html deleted file mode 100644 index 2711cbfc..00000000 --- a/html/models/outer_solar_system.html +++ /dev/null @@ -1,812 +0,0 @@ - - - - - - The Outer Solar System - - - - - - - - - - - - - - - - - -
-
-
- -
-

The Outer Solar System

-
Yingbo Ma, Chris Rackauckas
- -
- -

Data

-

The chosen units are: masses relative to the sun, so that the sun has mass $1$. We have taken $m_0 = 1.00000597682$ to take account of the inner planets. Distances are in astronomical units , times in earth days, and the gravitational constant is thus $G = 2.95912208286 \cdot 10^{−4}$.

-Markdown.Table(Array{Any,1}[[Any["planet"], Any["mass"], Any["initial position"], Any["initial velocity"]], [Any["Jupiter"], Any[$m_1 = 0.000954786104043$], Any["<ul><li>−3.5023653</li><li>−3.8169847</li><li>−1.5507963</li></ul>"], Any["<ul><li>0.00565429</li><li>−0.00412490</li><li>−0.00190589</li></ul>"]], [Any["Saturn"], Any[$m_2 = 0.000285583733151$], Any["<ul><li>9.0755314</li><li>−3.0458353</li><li>−1.6483708</li></ul>"], Any["<ul><li>0.00168318</li><li>0.00483525</li><li>0.00192462</li></ul>"]], [Any["Uranus"], Any[$m_3 = 0.0000437273164546$], Any["<ul><li>8.3101420</li><li>−16.2901086</li><li>−7.2521278</li></ul>"], Any["<ul><li>0.00354178</li><li>0.00137102</li><li>0.00055029</li></ul>"]], [Any["Neptune"], Any[$m_4 = 0.0000517759138449$], Any["<ul><li>11.4707666</li><li>−25.7294829</li><li>−10.8169456</li></ul>"], Any["<ul><li>0.00288930</li><li>0.00114527</li><li>0.00039677</li></ul>"]], [Any["Pluto"], Any["\$ m_5 = 1/(1.3 \\cdot 10^8 )\$"], Any["<ul><li>−15.5387357</li><li>−25.2225594</li><li>−3.1902382</li></ul>"], Any["<ul><li>0.00276725</li><li>−0.00170702</li><li>−0.00136504</li></ul>"]]], Symbol[:r, :r, :r, :r]) -

The data is taken from the book "Geometric Numerical Integration" by E. Hairer, C. Lubich and G. Wanner.

- - -
-using Plots, OrdinaryDiffEq, DiffEqPhysics, RecursiveArrayTools
-gr()
-
-G = 2.95912208286e-4
-M = [1.00000597682, 0.000954786104043, 0.000285583733151, 0.0000437273164546, 0.0000517759138449, 1/1.3e8]
-planets = ["Sun", "Jupiter", "Saturn", "Uranus", "Neptune", "Pluto"]
-
-pos_x = [0.0,-3.5023653,9.0755314,8.3101420,11.4707666,-15.5387357]
-pos_y = [0.0,-3.8169847,-3.0458353,-16.2901086,-25.7294829,-25.2225594]
-pos_z = [0.0,-1.5507963,-1.6483708,-7.2521278,-10.8169456,-3.1902382]
-pos = ArrayPartition(pos_x,pos_y,pos_z)
-
-vel_x = [0.0,0.00565429,0.00168318,0.00354178,0.00288930,0.00276725]
-vel_y = [0.0,-0.00412490,0.00483525,0.00137102,0.00114527,-0.00170702]
-vel_z = [0.0,-0.00190589,0.00192462,0.00055029,0.00039677,-0.00136504]
-vel = ArrayPartition(vel_x,vel_y,vel_z)
-
-tspan = (0.,200_000)
-
- - -
-(0.0, 200000)
-
- - -

The N-body problem's Hamiltonian is

-

\[ -H(p,q) = \frac{1}{2}\sum_{i=0}^{N}\frac{p_{i}^{T}p_{i}}{m_{i}} - G\sum_{i=1}^{N}\sum_{j=0}^{i-1}\frac{m_{i}m_{j}}{\left\lVert q_{i}-q_{j} \right\rVert} -\]

-

Here, we want to solve for the motion of the five outer planets relative to the sun, namely, Jupiter, Saturn, Uranus, Neptune and Pluto.

- - -
-const  = sum
-const N = 6
-potential(p, t, x, y, z, M) = -G*(i->(j->(M[i]*M[j])/sqrt((x[i]-x[j])^2 + (y[i]-y[j])^2 + (z[i]-z[j])^2), 1:i-1), 2:N)
-
- - -
-potential (generic function with 1 method)
-
- - -

Hamiltonian System

-

NBodyProblem constructs a second order ODE problem under the hood. We know that a Hamiltonian system has the form of

-

\[ -\dot{p} = -H_{q}(p,q)\quad \dot{q}=H_{p}(p,q) -\]

-

For an N-body system, we can symplify this as:

-

\[ -\dot{p} = -\nabla{V}(q)\quad \dot{q}=M^{-1}p. -\]

-

Thus $\dot{q}$ is defined by the masses. We only need to define $\dot{p}$, and this is done internally by taking the gradient of $V$. Therefore, we only need to pass the potential function and the rest is taken care of.

- - -
-nprob = NBodyProblem(potential, M, pos, vel, tspan)
-sol = solve(nprob,Yoshida6(), dt=100);
-
- - - - -
-orbitplot(sol,body_names=planets)
-
- - - - - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("models","outer_solar_system.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/ode_extras/feagin.html b/html/ode_extras/feagin.html deleted file mode 100644 index e04e61fd..00000000 --- a/html/ode_extras/feagin.html +++ /dev/null @@ -1,899 +0,0 @@ - - - - - - Feagin's Order 10, 12, and 14 Methods - - - - - - - - - - - - - - - - - -
-
-
- -
-

Feagin's Order 10, 12, and 14 Methods

-
Chris Rackauckas
- -
- -

DifferentialEquations.jl includes Feagin's explicit Runge-Kutta methods of orders 10/8, 12/10, and 14/12. These methods have such high order that it's pretty much required that one uses numbers with more precision than Float64. As a prerequisite reference on how to use arbitrary number systems (including higher precision) in the numerical solvers, please see the Solving Equations in With Chosen Number Types notebook.

-

Investigation of the Method's Error

-

We can use Feagin's order 16 method as follows. Let's use a two-dimensional linear ODE. Like in the Solving Equations in With Chosen Number Types notebook, we change the initial condition to BigFloats to tell the solver to use BigFloat types.

- - -
-using DifferentialEquations
-const linear_bigα = big(1.01)
-f(u,p,t) = (linear_bigα*u)
-
-# Add analytical solution so that errors are checked
-f_analytic(u0,p,t) = u0*exp(linear_bigα*t)
-ff = ODEFunction(f,analytic=f_analytic)
-prob = ODEProblem(ff,big(0.5),(0.0,1.0))
-sol = solve(prob,Feagin14(),dt=1//16,adaptive=false);
-
- - - - -
-println(sol.errors)
-
- - -
-Dict(:l∞=>2.19751e-23,:final=>2.19751e-23,:l2=>1.0615e-23)
-
- - -

Compare that to machine $\epsilon$ for Float64:

- - -
-eps(Float64)
-
- - -
-2.220446049250313e-16
-
- - -

The error for Feagin's method when the stepsize is 1/16 is 8 orders of magnitude below machine $\epsilon$! However, that is dependent on the stepsize. If we instead use adaptive timestepping with the default tolerances, we get

- - -
-sol =solve(prob,Feagin14());
-println(sol.errors); print("The length was $(length(sol))")
-
- - -
-Dict(:l∞=>1.54574e-09,:final=>1.54574e-09,:l2=>8.92507e-10)
-The length was 3
-
- - -

Notice that when the stepsize is much higher, the error goes up quickly as well. These super high order methods are best when used to gain really accurate approximations (using still modest timesteps). Some examples of where such precision is necessary is astrodynamics where the many-body problem is highly chaotic and thus sensitive to small errors.

-

Convergence Test

-

The Order 14 method is awesome, but we need to make sure it's really that awesome. The following convergence test is used in the package tests in order to make sure the implementation is correct. Note that all methods have such tests in place.

- - -
-using DiffEqDevTools
-dts = 1.0 ./ 2.0 .^(10:-1:4)
-sim = test_convergence(dts,prob,Feagin14())
-
- - -
-DiffEqDevTools.ConvergenceSimulation{DiffEqBase.ODESolution{BigFloat,1,Arra
-y{BigFloat,1},Array{BigFloat,1},Dict{Symbol,BigFloat},Array{Float64,1},Arra
-y{Array{BigFloat,1},1},DiffEqBase.ODEProblem{BigFloat,Tuple{Float64,Float64
-},false,Nothing,DiffEqBase.ODEFunction{false,typeof(Main.WeaveSandBox20.f),
-LinearAlgebra.UniformScaling{Bool},typeof(Main.WeaveSandBox20.f_analytic),N
-othing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing},Nothing,DiffEqBase.
-StandardODEProblem},OrdinaryDiffEq.Feagin14,OrdinaryDiffEq.InterpolationDat
-a{DiffEqBase.ODEFunction{false,typeof(Main.WeaveSandBox20.f),LinearAlgebra.
-UniformScaling{Bool},typeof(Main.WeaveSandBox20.f_analytic),Nothing,Nothing
-,Nothing,Nothing,Nothing,Nothing,Nothing},Array{BigFloat,1},Array{Float64,1
-},Array{Array{BigFloat,1},1},OrdinaryDiffEq.Feagin14ConstantCache{BigFloat,
-Float64}}}}(DiffEqBase.ODESolution{BigFloat,1,Array{BigFloat,1},Array{BigFl
-oat,1},Dict{Symbol,BigFloat},Array{Float64,1},Array{Array{BigFloat,1},1},Di
-ffEqBase.ODEProblem{BigFloat,Tuple{Float64,Float64},false,Nothing,DiffEqBas
-e.ODEFunction{false,typeof(Main.WeaveSandBox20.f),LinearAlgebra.UniformScal
-ing{Bool},typeof(Main.WeaveSandBox20.f_analytic),Nothing,Nothing,Nothing,No
-thing,Nothing,Nothing,Nothing},Nothing,DiffEqBase.StandardODEProblem},Ordin
-aryDiffEq.Feagin14,OrdinaryDiffEq.InterpolationData{DiffEqBase.ODEFunction{
-false,typeof(Main.WeaveSandBox20.f),LinearAlgebra.UniformScaling{Bool},type
-of(Main.WeaveSandBox20.f_analytic),Nothing,Nothing,Nothing,Nothing,Nothing,
-Nothing,Nothing},Array{BigFloat,1},Array{Float64,1},Array{Array{BigFloat,1}
-,1},OrdinaryDiffEq.Feagin14ConstantCache{BigFloat,Float64}}}[retcode: Succe
-ss
-Interpolation: 3rd order Hermite
-t: [0.0, 0.000976563, 0.00195313, 0.00292969, 0.00390625, 0.00488281, 0.005
-85938, 0.00683594, 0.0078125, 0.00878906  …  0.991211, 0.992188, 0.993164, 
-0.994141, 0.995117, 0.996094, 0.99707, 0.998047, 0.999023, 1.0]
-u: BigFloat[0.50, 0.500493, 0.500987, 0.501482, 0.501977, 0.502472, 0.50296
-8, 0.503464, 0.503961, 0.504458  …  1.36067, 1.36201, 1.36335, 1.3647, 1.36
-605, 1.3674, 1.36874, 1.3701, 1.37145, 1.3728], retcode: Success
-Interpolation: 3rd order Hermite
-t: [0.0, 0.00195313, 0.00390625, 0.00585938, 0.0078125, 0.00976563, 0.01171
-88, 0.0136719, 0.015625, 0.0175781  …  0.982422, 0.984375, 0.986328, 0.9882
-81, 0.990234, 0.992188, 0.994141, 0.996094, 0.998047, 1.0]
-u: BigFloat[0.50, 0.500987, 0.501977, 0.502968, 0.503961, 0.504956, 0.50595
-3, 0.506952, 0.507953, 0.508956  …  1.34864, 1.35131, 1.35397, 1.35665, 1.3
-5933, 1.36201, 1.3647, 1.3674, 1.3701, 1.3728], retcode: Success
-Interpolation: 3rd order Hermite
-t: [0.0, 0.00390625, 0.0078125, 0.0117188, 0.015625, 0.0195313, 0.0234375, 
-0.0273438, 0.03125, 0.0351563  …  0.964844, 0.96875, 0.972656, 0.976563, 0.
-980469, 0.984375, 0.988281, 0.992188, 0.996094, 1.0]
-u: BigFloat[0.50, 0.501977, 0.503961, 0.505953, 0.507953, 0.509961, 0.51197
-7, 0.514001, 0.516033, 0.518073  …  1.32491, 1.33015, 1.33541, 1.34069, 1.3
-4599, 1.35131, 1.35665, 1.36201, 1.3674, 1.3728], retcode: Success
-Interpolation: 3rd order Hermite
-t: [0.0, 0.0078125, 0.015625, 0.0234375, 0.03125, 0.0390625, 0.046875, 0.05
-46875, 0.0625, 0.0703125  …  0.929688, 0.9375, 0.945313, 0.953125, 0.960938
-, 0.96875, 0.976563, 0.984375, 0.992188, 1.0]
-u: BigFloat[0.50, 0.503961, 0.507953, 0.511977, 0.516033, 0.520121, 0.52424
-1, 0.528394, 0.53258, 0.536799  …  1.27869, 1.28882, 1.29903, 1.30932, 1.31
-969, 1.33015, 1.34069, 1.35131, 1.36201, 1.3728], retcode: Success
-Interpolation: 3rd order Hermite
-t: [0.0, 0.015625, 0.03125, 0.046875, 0.0625, 0.078125, 0.09375, 0.109375, 
-0.125, 0.140625  …  0.859375, 0.875, 0.890625, 0.90625, 0.921875, 0.9375, 0
-.953125, 0.96875, 0.984375, 1.0]
-u: BigFloat[0.50, 0.507953, 0.516033, 0.524241, 0.53258, 0.541051, 0.549658
-, 0.558401, 0.567283, 0.576306  …  1.19103, 1.20998, 1.22923, 1.24878, 1.26
-864, 1.28882, 1.30932, 1.33015, 1.35131, 1.3728], retcode: Success
-Interpolation: 3rd order Hermite
-t: [0.0, 0.03125, 0.0625, 0.09375, 0.125, 0.15625, 0.1875, 0.21875, 0.25, 0
-.28125  …  0.71875, 0.75, 0.78125, 0.8125, 0.84375, 0.875, 0.90625, 0.9375,
- 0.96875, 1.0]
-u: BigFloat[0.50, 0.516033, 0.53258, 0.549658, 0.567283, 0.585473, 0.604247
-, 0.623623, 0.64362, 0.664258  …  1.03333, 1.06647, 1.10067, 1.13596, 1.172
-39, 1.20998, 1.24878, 1.28882, 1.33015, 1.3728], retcode: Success
-Interpolation: 3rd order Hermite
-t: [0.0, 0.0625, 0.125, 0.1875, 0.25, 0.3125, 0.375, 0.4375, 0.5, 0.5625, 0
-.625, 0.6875, 0.75, 0.8125, 0.875, 0.9375, 1.0]
-u: BigFloat[0.50, 0.53258, 0.567283, 0.604247, 0.64362, 0.685558, 0.730229,
- 0.777811, 0.828493, 0.882477, 0.93998, 1.00123, 1.06647, 1.13596, 1.20998,
- 1.28882, 1.3728]], Dict{Any,Any}(:l∞=>BigFloat[3.35435e-49, 5.07978e-45, 6
-.96505e-41, 6.99856e-37, 2.7616e-33, 4.96506e-28, 2.19751e-23],:final=>BigF
-loat[3.35435e-49, 5.07978e-45, 6.96505e-41, 6.99856e-37, 2.7616e-33, 4.9650
-6e-28, 2.19751e-23],:l2=>BigFloat[1.55766e-49, 2.36041e-45, 3.24061e-41, 3.
-26457e-37, 1.29478e-33, 2.35149e-28, 1.0615e-23]), 7, Dict(:dts=>[0.0009765
-63, 0.00195313, 0.00390625, 0.0078125, 0.015625, 0.03125, 0.0625]), Dict{An
-y,Any}(:l∞=>14.2933,:final=>14.2933,:l2=>14.3028), [0.000976563, 0.00195313
-, 0.00390625, 0.0078125, 0.015625, 0.03125, 0.0625])
-
- - -

For a view of what's going on, let's plot the simulation results.

- - -
-using Plots
-gr()
-plot(sim)
-
- - - - -

This is a clear trend indicating that the convergence is truly Order 14, which is the estimated slope.

- - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("ode_extras","feagin.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/ode_extras/monte_carlo_parameter_estim.html b/html/ode_extras/monte_carlo_parameter_estim.html deleted file mode 100644 index cc2c3f22..00000000 --- a/html/ode_extras/monte_carlo_parameter_estim.html +++ /dev/null @@ -1,1093 +0,0 @@ - - - - - - Monte Carlo Parameter Estimation From Data - - - - - - - - - - - - - - - - - -
-
-
- -
-

Monte Carlo Parameter Estimation From Data

-
Chris Rackauckas
- -
- -

First you want to create a problem which solves multiple problems at the same time. This is the Monte Carlo Problem. When the parameter estimation tools say it will take any DEProblem, it really means ANY DEProblem!

-

So, let's get a Monte Carlo problem setup that solves with 10 different initial conditions.

- - -
-using DifferentialEquations, DiffEqParamEstim, Plots, Optim
-
-# Monte Carlo Problem Set Up for solving set of ODEs with different initial conditions
-
-# Set up Lotka-Volterra system
-function pf_func(du,u,p,t)
-  du[1] = p[1] * u[1] - p[2] * u[1]*u[2]
-  du[2] = -3 * u[2] + u[1]*u[2]
-end
-p = [1.5,1.0]
-prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),p)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 10.0)
-u0: [1.0, 1.0]
-
- - -

Now for a MonteCarloProblem we have to take this problem and tell it what to do N times via the prob_func. So let's generate N=10 different initial conditions, and tell it to run the same problem but with these 10 different initial conditions each time:

- - -
-# Setting up to solve the problem N times (for the N different initial conditions)
-N = 10;
-initial_conditions = [[1.0,1.0], [1.0,1.5], [1.5,1.0], [1.5,1.5], [0.5,1.0], [1.0,0.5], [0.5,0.5], [2.0,1.0], [1.0,2.0], [2.0,2.0]]
-function prob_func(prob,i,repeat)
-  ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p)
-end
-monte_prob = MonteCarloProblem(prob,prob_func=prob_func)
-
- - -
-MonteCarloProblem with problem ODEProblem
-
- - -

We can check this does what we want by solving it:

- - -
-# Check above does what we want
-sim = solve(monte_prob,Tsit5(),num_monte=N)
-plot(sim)
-
- - - - -

nummonte=N means "run N times", and each time it runs the problem returned by the probfunc, which is always the same problem but with the ith initial condition.

-

Now let's generate a dataset from that. Let's get data points at every t=0.1 using saveat, and then convert the solution into an array.

- - -
-# Generate a dataset from these runs
-data_times = 0.0:0.1:10.0
-sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times)
-data = Array(sim)
-
- - -
-2×101×10 Array{Float64,3}:
-[:, :, 1] =
- 1.0  1.06108   1.14403   1.24917   1.37764   …  0.956979  0.983561  1.0337
-6 
- 1.0  0.821084  0.679053  0.566893  0.478813     1.35559   1.10629   0.9063
-71
-
-[:, :, 2] =
- 1.0  1.01413  1.05394  1.11711   …  1.05324  1.01309  1.00811  1.03162
- 1.5  1.22868  1.00919  0.833191     2.08023  1.70818  1.39973  1.14803
-
-[:, :, 3] =
- 1.5  1.58801   1.70188   1.84193   2.00901   …  2.0153    2.21084   2.4358
-9 
- 1.0  0.864317  0.754624  0.667265  0.599149     0.600942  0.549793  0.5136
-79
-
-[:, :, 4] =
- 1.5  1.51612  1.5621   1.63555   1.73531   …  1.83823   1.98545   2.15958 
- 1.5  1.29176  1.11592  0.969809  0.850159     0.771089  0.691421  0.630025
-
-[:, :, 5] =
- 0.5  0.531705  0.576474  0.634384  0.706139  …  9.05366   9.4006   8.83911
- 1.0  0.77995   0.610654  0.480565  0.380645     0.809382  1.51708  2.82619
-
-[:, :, 6] =
- 1.0  1.11027   1.24238   1.39866   1.58195   …  0.753108  0.748814  0.7682
-84
- 0.5  0.411557  0.342883  0.289812  0.249142     1.73879   1.38829   1.1093
-2 
-
-[:, :, 7] =
- 0.5  0.555757  0.623692  0.705084  0.80158   …  8.11216   9.10671   9.9217
- 
- 0.5  0.390449  0.30679   0.24286   0.193966     0.261298  0.455937  0.8788
-1
-
-[:, :, 8] =
- 2.0  2.11239   2.24921   2.41003   2.59433   …  3.223     3.47362   3.7301
-4 
- 1.0  0.909749  0.838025  0.783532  0.745339     0.739471  0.765597  0.8130
-86
-
-[:, :, 9] =
- 1.0  0.969326  0.971358  1.00017  …  1.25065  1.1012   1.01733  0.979306
- 2.0  1.63445   1.33389   1.09031     3.02671  2.52063  2.07502  1.69807 
-
-[:, :, 10] =
- 2.0  1.92148  1.88215  1.87711  1.90264  …  2.15079   2.27938   2.43105
- 2.0  1.80195  1.61405  1.4426   1.2907      0.957221  0.884827  0.82948
-
- - -

Here, data[i,j,k] is the same as sim[i,j,k] which is the same as sim[k]i,j. So data[i,j,k] is the jth timepoint of the ith variable in the kth trajectory.

-

Now let's build a loss function. A loss function is some loss(sol) that spits out a scalar for how far from optimal we are. In the documentation I show that we normally do loss = L2Loss(t,data), but we can bootstrap off of this. Instead lets build an array of N loss functions, each one with the correct piece of data.

- - -
-# Building a loss function
-losses = [L2Loss(data_times,data[:,:,i]) for i in 1:N]
-
- - -
-10-element Array{DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePre
-cision{Float64},Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Noth
-ing,Nothing},1}:
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [1.0 1.06108 … 0.983561 1.03376; 1.0 0.821084 … 1.10629 0.906371
-], nothing, nothing, nothing, nothing)
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [1.0 1.01413 … 1.00811 1.03162; 1.5 1.22868 … 1.39973 1.14803], 
-nothing, nothing, nothing, nothing)   
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [1.5 1.58801 … 2.21084 2.43589; 1.0 0.864317 … 0.549793 0.513679
-], nothing, nothing, nothing, nothing)
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [1.5 1.51612 … 1.98545 2.15958; 1.5 1.29176 … 0.691421 0.630025]
-, nothing, nothing, nothing, nothing) 
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [0.5 0.531705 … 9.4006 8.83911; 1.0 0.77995 … 1.51708 2.82619], 
-nothing, nothing, nothing, nothing)   
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [1.0 1.11027 … 0.748814 0.768284; 0.5 0.411557 … 1.38829 1.10932
-], nothing, nothing, nothing, nothing)
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [0.5 0.555757 … 9.10671 9.9217; 0.5 0.390449 … 0.455937 0.87881]
-, nothing, nothing, nothing, nothing) 
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [2.0 2.11239 … 3.47362 3.73014; 1.0 0.909749 … 0.765597 0.813086
-], nothing, nothing, nothing, nothing)
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [1.0 0.969326 … 1.01733 0.979306; 2.0 1.63445 … 2.07502 1.69807]
-, nothing, nothing, nothing, nothing) 
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [2.0 1.92148 … 2.27938 2.43105; 2.0 1.80195 … 0.884827 0.82948],
- nothing, nothing, nothing, nothing)
-
- - -

So losses[i] is a function which computes the loss of a solution against the data of the ith trajectory. So to build our true loss function, we sum the losses:

- - -
-loss(sim) = sum(losses[i](sim[i]) for i in 1:N)
-
- - -
-loss (generic function with 1 method)
-
- - -

As a double check, make sure that loss(sim) outputs zero (since we generated the data from sim). Now we generate data with other parameters:

- - -
-prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),[1.2,0.8])
-function prob_func(prob,i,repeat)
-  ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p)
-end
-monte_prob = MonteCarloProblem(prob,prob_func=prob_func)
-sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times)
-loss(sim)
-
- - -
-10108.695792044306
-
- - -

and get a non-zero loss. So we now have our problem, our data, and our loss function... we have what we need.

-

Put this into buildlossobjective.

- - -
-obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N,
-                           saveat=data_times)
-
- - -
-(::DiffEqParamEstim.DiffEqObjective{getfield(DiffEqParamEstim, Symbol("##29
-#34")){Nothing,Bool,Int64,typeof(DiffEqParamEstim.STANDARD_PROB_GENERATOR),
-Base.Iterators.Pairs{Symbol,Any,Tuple{Symbol,Symbol},NamedTuple{(:num_monte
-, :saveat),Tuple{Int64,StepRangeLen{Float64,Base.TwicePrecision{Float64},Ba
-se.TwicePrecision{Float64}}}}},DiffEqBase.MonteCarloProblem{DiffEqBase.ODEP
-roblem{Array{Float64,1},Tuple{Float64,Float64},true,Array{Float64,1},DiffEq
-Base.ODEFunction{true,typeof(Main.WeaveSandBox22.pf_func),LinearAlgebra.Uni
-formScaling{Bool},Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,N
-othing},Nothing,DiffEqBase.StandardODEProblem},typeof(Main.WeaveSandBox22.p
-rob_func),getfield(DiffEqBase, Symbol("##378#384")),getfield(DiffEqBase, Sy
-mbol("##380#386")),Array{Any,1}},OrdinaryDiffEq.Tsit5,typeof(Main.WeaveSand
-Box22.loss),Nothing},getfield(DiffEqParamEstim, Symbol("##33#39"))}) (gener
-ic function with 2 methods)
-
- - -

Notice that I added the kwargs for solve into this. They get passed to an internal solve command, so then the loss is computed on N trajectories at data_times.

-

Thus we take this objective function over to any optimization package. I like to do quick things in Optim.jl. Here, since the Lotka-Volterra equation requires positive parameters, I use Fminbox to make sure the parameters stay positive. I start the optimization with [1.3,0.9], and Optim spits out that the true parameters are:

- - -
-lower = zeros(2)
-upper = fill(2.0,2)
-result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS()))
-
- - -
-Results of Optimization Algorithm
- * Algorithm: Fminbox with BFGS
- * Starting Point: [1.3,0.9]
- * Minimizer: [1.5000000000573428,1.0000000001610496]
- * Minimum: 7.028970e-16
- * Iterations: 4
- * Convergence: true
-   * |x - x'| ≤ 0.0e+00: true 
-     |x - x'| = 0.00e+00 
-   * |f(x) - f(x')| ≤ 0.0e+00 |f(x)|: true
-     |f(x) - f(x')| = 0.00e+00 |f(x)|
-   * |g(x)| ≤ 1.0e-08: false 
-     |g(x)| = 1.06e-06 
-   * Stopped by an increasing objective: true
-   * Reached Maximum Number of Iterations: false
- * Objective Calls: 195
- * Gradient Calls: 195
-
- - - -
-result
-
- - -
-Results of Optimization Algorithm
- * Algorithm: Fminbox with BFGS
- * Starting Point: [1.3,0.9]
- * Minimizer: [1.5000000000573428,1.0000000001610496]
- * Minimum: 7.028970e-16
- * Iterations: 4
- * Convergence: true
-   * |x - x'| ≤ 0.0e+00: true 
-     |x - x'| = 0.00e+00 
-   * |f(x) - f(x')| ≤ 0.0e+00 |f(x)|: true
-     |f(x) - f(x')| = 0.00e+00 |f(x)|
-   * |g(x)| ≤ 1.0e-08: false 
-     |g(x)| = 1.06e-06 
-   * Stopped by an increasing objective: true
-   * Reached Maximum Number of Iterations: false
- * Objective Calls: 195
- * Gradient Calls: 195
-
- - -

Optim finds one but not the other parameter.

-

I would run a test on synthetic data for your problem before using it on real data. Maybe play around with different optimization packages, or add regularization. You may also want to decrease the tolerance of the ODE solvers via

- - -
-obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N,
-                           abstol=1e-8,reltol=1e-8,
-                           saveat=data_times)
-result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS()))
-
- - -
-Results of Optimization Algorithm
- * Algorithm: Fminbox with BFGS
- * Starting Point: [1.3,0.9]
- * Minimizer: [1.5007434761923657,1.001238477498098]
- * Minimum: 4.163900e-02
- * Iterations: 5
- * Convergence: true
-   * |x - x'| ≤ 0.0e+00: true 
-     |x - x'| = 0.00e+00 
-   * |f(x) - f(x')| ≤ 0.0e+00 |f(x)|: true
-     |f(x) - f(x')| = 0.00e+00 |f(x)|
-   * |g(x)| ≤ 1.0e-08: false 
-     |g(x)| = 1.66e-06 
-   * Stopped by an increasing objective: true
-   * Reached Maximum Number of Iterations: false
- * Objective Calls: 227
- * Gradient Calls: 227
-
- - - -
-result
-
- - -
-Results of Optimization Algorithm
- * Algorithm: Fminbox with BFGS
- * Starting Point: [1.3,0.9]
- * Minimizer: [1.5007434761923657,1.001238477498098]
- * Minimum: 4.163900e-02
- * Iterations: 5
- * Convergence: true
-   * |x - x'| ≤ 0.0e+00: true 
-     |x - x'| = 0.00e+00 
-   * |f(x) - f(x')| ≤ 0.0e+00 |f(x)|: true
-     |f(x) - f(x')| = 0.00e+00 |f(x)|
-   * |g(x)| ≤ 1.0e-08: false 
-     |g(x)| = 1.66e-06 
-   * Stopped by an increasing objective: true
-   * Reached Maximum Number of Iterations: false
- * Objective Calls: 227
- * Gradient Calls: 227
-
- - -

if you suspect error is the problem. However, if you're having problems it's most likely not the ODE solver tolerance and mostly because parameter inference is a very hard optimization problem.

- - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("ode_extras","monte_carlo_parameter_estim.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/ode_extras/ode_minmax.html b/html/ode_extras/ode_minmax.html deleted file mode 100644 index ed51ee78..00000000 --- a/html/ode_extras/ode_minmax.html +++ /dev/null @@ -1,1021 +0,0 @@ - - - - - - Finding Maxima and Minima of DiffEq Solutions - - - - - - - - - - - - - - - - - -
-
-
- -
-

Finding Maxima and Minima of DiffEq Solutions

-
Chris Rackauckas
- -
- -

Setup

-

In this tutorial we will show how to use Optim.jl to find the maxima and minima of solutions. Let's take a look at the double pendulum:

- - -
-#Constants and setup
-using OrdinaryDiffEq
-initial = [0.01, 0.01, 0.01, 0.01]
-tspan = (0.,100.)
-
-#Define the problem
-function double_pendulum_hamiltonian(udot,u,p,t)
-    α  = u[1]
-     = u[2]
-    β  = u[3]
-     = u[4]
-    udot .=
-    [2(-(1+cos(β)))/(3-cos(2β)),
-    -2sin(α) - sin(α+β),
-    2(-(1+cos(β)) + (3+2cos(β)))/(3-cos(2β)),
-    -sin(α+β) - 2sin(β)*(((-))/(3-cos(2β))) + 2sin(2β)*((^2 - 2(1+cos(β))* + (3+2cos(β))^2)/(3-cos(2β))^2)]
-end
-
-#Pass to solvers
-poincare = ODEProblem(double_pendulum_hamiltonian, initial, tspan)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 100.0)
-u0: [0.01, 0.01, 0.01, 0.01]
-
- - - -
-sol = solve(poincare, Tsit5())
-
- - -
-retcode: Success
-Interpolation: specialized 4th order "free" interpolation
-t: 193-element Array{Float64,1}:
-   0.0                
-   0.08332584852065579
-   0.24175280272193683
-   0.438953650048967  
-   0.679732254249109  
-   0.9647633763375199 
-   1.317944955684634  
-   1.7031210236334697 
-   2.067847793204029  
-   2.4717825254408226 
-   ⋮                  
-  95.84571836675161   
-  96.35777612654947   
-  96.9291238553289    
-  97.44678729813481   
-  97.96247442963697   
-  98.5118249699588    
-  99.06081878698636   
-  99.58283477685136   
- 100.0                
-u: 193-element Array{Array{Float64,1},1}:
- [0.01, 0.01, 0.01, 0.01]                          
- [0.00917069, 0.006669, 0.0124205, 0.00826641]     
- [0.00767328, 0.000374625, 0.0164426, 0.00463683]  
- [0.00612597, -0.00730546, 0.0199674, -0.000336506]
- [0.0049661, -0.0163086, 0.0214407, -0.00670509]   
- [0.00479557, -0.0262381, 0.0188243, -0.0139134]   
- [0.00605469, -0.0371246, 0.0100556, -0.0210382]   
- [0.00790078, -0.046676, -0.00267353, -0.025183]   
- [0.00827652, -0.0527843, -0.0127315, -0.0252581]  
- [0.00552358, -0.0552525, -0.0168439, -0.021899]   
- ⋮                                                 
- [-0.0148868, 0.0423324, 0.0136282, 0.0180291]     
- [-0.00819054, 0.0544225, 0.00944831, 0.0177401]   
- [0.00412448, 0.0567489, -0.00515392, 0.017597]    
- [0.0130796, 0.0480772, -0.0137706, 0.0182866]     
- [0.0153161, 0.0316313, -0.00895722, 0.0171185]    
- [0.0111156, 0.00992938, 0.0072972, 0.0103535]     
- [0.00571392, -0.0117872, 0.020508, -0.00231029]   
- [0.00421143, -0.0299109, 0.0187506, -0.0156505]   
- [0.00574124, -0.0416539, 0.00741327, -0.023349]
-
- - -

In time, the solution looks like:

- - -
-using Plots; gr()
-plot(sol, vars=[(0,3),(0,4)], leg=false, plotdensity=10000)
-
- - - - -

while it has the well-known phase-space plot:

- - -
-plot(sol, vars=(3,4), leg=false)
-
- - - - -

Local Optimization

-

Let's fine out what some of the local maxima and minima are. Optim.jl can be used to minimize functions, and the solution type has a continuous interpolation which can be used. Let's look for the local optima for the 4th variable around t=20. Thus our optimization function is:

- - -
-f = (t) -> sol(t,idxs=4)
-
- - -
-#1 (generic function with 1 method)
-
- - -

first(t) is the same as t[1] which transforms the array of size 1 into a number. idxs=4 is the same as sol(first(t))[4] but does the calculation without a temporary array and thus is faster. To find a local minima, we can simply call Optim on this function. Let's find a local minimum:

- - -
-using Optim
-opt = optimize(f,18.0,22.0)
-
- - -
-Results of Optimization Algorithm
- * Algorithm: Brent's Method
- * Search Interval: [18.000000, 22.000000]
- * Minimizer: 1.863213e+01
- * Minimum: -2.793164e-02
- * Iterations: 11
- * Convergence: max(|x - x_upper|, |x - x_lower|) <= 2*(1.5e-08*|x|+2.2e-16
-): true
- * Objective Function Calls: 12
-
- - -

From this printout we see that the minimum is at t=18.63 and the value is -2.79e-2. We can get these in code-form via:

- - -
-println(opt.minimizer)
-
- - -
-18.632126799604933
-
- - - -
-println(opt.minimum)
-
- - -
--0.027931635264245896
-
- - -

To get the maximum, we just minimize the negative of the function:

- - -
-f = (t) -> -sol(first(t),idxs=4)
-opt2 = optimize(f,0.0,22.0)
-
- - -
-Results of Optimization Algorithm
- * Algorithm: Brent's Method
- * Search Interval: [0.000000, 22.000000]
- * Minimizer: 1.399975e+01
- * Minimum: -2.269411e-02
- * Iterations: 13
- * Convergence: max(|x - x_upper|, |x - x_lower|) <= 2*(1.5e-08*|x|+2.2e-16
-): true
- * Objective Function Calls: 14
-
- - -

Let's add the maxima and minima to the plots:

- - -
-plot(sol, vars=(0,4), plotdensity=10000)
-scatter!([opt.minimizer],[opt.minimum],label="Local Min")
-scatter!([opt2.minimizer],[-opt2.minimum],label="Local Max")
-
- - - - -

Brent's method will locally minimize over the full interval. If we instead want a local maxima nearest to a point, we can use BFGS(). In this case, we need to optimize a vector [t], and thus dereference it to a number using first(t).

- - -
-f = (t) -> -sol(first(t),idxs=4)
-opt = optimize(f,[20.0],BFGS())
-
- - -
-Results of Optimization Algorithm
- * Algorithm: BFGS
- * Starting Point: [20.0]
- * Minimizer: [23.29760728871635]
- * Minimum: -2.588588e-02
- * Iterations: 4
- * Convergence: true
-   * |x - x'| ≤ 0.0e+00: false 
-     |x - x'| = 1.11e-04 
-   * |f(x) - f(x')| ≤ 0.0e+00 |f(x)|: false
-     |f(x) - f(x')| = -6.49e-09 |f(x)|
-   * |g(x)| ≤ 1.0e-08: true 
-     |g(x)| = 8.42e-12 
-   * Stopped by an increasing objective: false
-   * Reached Maximum Number of Iterations: false
- * Objective Calls: 16
- * Gradient Calls: 16
-
- - -

Global Optimization

-

If we instead want to find global maxima and minima, we need to look somewhere else. For this there are many choices. A pure Julia option is BlackBoxOptim.jl, but I will use NLopt.jl. Following the NLopt.jl tutorial but replacing their function with out own:

- - -
-import NLopt, ForwardDiff
-
-count = 0 # keep track of # function evaluations
-
-function g(t::Vector, grad::Vector)
-  if length(grad) > 0
-    #use ForwardDiff for the gradients
-    grad[1] = ForwardDiff.derivative((t)->sol(first(t),idxs=4),t)
-  end
-  sol(first(t),idxs=4)
-end
-opt = NLopt.Opt(:GN_ORIG_DIRECT_L, 1)
-NLopt.lower_bounds!(opt, [0.0])
-NLopt.upper_bounds!(opt, [40.0])
-NLopt.xtol_rel!(opt,1e-8)
-NLopt.min_objective!(opt, g)
-(minf,minx,ret) = NLopt.optimize(opt,[20.0])
-println(minf," ",minx," ",ret)
-
- - -
--0.027931635264245837 [18.6321] XTOL_REACHED
-
- - - -
-NLopt.max_objective!(opt, g)
-(maxf,maxx,ret) = NLopt.optimize(opt,[20.0])
-println(maxf," ",maxx," ",ret)
-
- - -
-0.027968571933041954 [6.5537] XTOL_REACHED
-
- - - -
-plot(sol, vars=(0,4), plotdensity=10000)
-scatter!([minx],[minf],label="Global Min")
-scatter!([maxx],[maxf],label="Global Max")
-
- - - - - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("ode_extras","ode_minmax.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/test.html b/html/test.html deleted file mode 100644 index ad839373..00000000 --- a/html/test.html +++ /dev/null @@ -1,743 +0,0 @@ - - - - - - Test - - - - - - - - - - - - - - - - - -
-
-
- -
-

Test

-
Chris Rackauckas
- -
- -

This is a test of the builder system.

- - -
-using DiffEqTutorials
-DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file])
-
- - - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file(".","test.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.6.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.14.0
-[2a06ce6d-1589-592b-9c33-f37faeaed826] UnitfulPlots 0.0.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.7.2
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/type_handling/number_types.html b/html/type_handling/number_types.html deleted file mode 100644 index f73f310c..00000000 --- a/html/type_handling/number_types.html +++ /dev/null @@ -1,955 +0,0 @@ - - - - - - Solving Equations in With Julia-Defined Types - - - - - - - - - - - - - - - - - -
-
-
- -
-

Solving Equations in With Julia-Defined Types

-
Chris Rackauckas
- -
- -

One of the nice things about DifferentialEquations.jl is that it is designed with Julia's type system in mind. What this means is, if you have properly defined a Number type, you can use this number type in DifferentialEquations.jl's algorithms! [Note that this is restricted to the native algorithms of OrdinaryDiffEq.jl. The other solvers such as ODE.jl, Sundials.jl, and ODEInterface.jl are not compatible with some number systems.]

-

DifferentialEquations.jl determines the numbers to use in its solvers via the types that are designated by tspan and the initial condition of the problem. It will keep the time values in the same type as tspan, and the solution values in the same type as the initial condition. [Note that adaptive timestepping requires that the time type is compaible with sqrt and ^ functions. Thus dt cannot be Integer or numbers like that if adaptive timestepping is chosen].

-

Let's solve the linear ODE first define an easy way to get ODEProblems for the linear ODE:

- - -
-using DifferentialEquations
-f = (u,p,t) -> (p*u)
-prob_ode_linear = ODEProblem(f,1/2,(0.0,1.0),1.01);
-
- - - -

First let's solve it using Float64s. To do so, we just need to set u0 to a Float64 (which is done by the default) and dt should be a float as well.

- - -
-prob = prob_ode_linear
-sol =solve(prob,Tsit5())
-println(sol)
-
- - -
-retcode: Success
-Interpolation: specialized 4th order "free" interpolation
-t: [0.0, 0.0996426, 0.345703, 0.677692, 1.0]
-u: [0.5, 0.552939, 0.708938, 0.99136, 1.3728]
-
- - -

Notice that both the times and the solutions were saved as Float64. Let's change the time to use rational values. Rationals are not compatible with adaptive time stepping since they do not have an L2 norm (this can be worked around by defining internalnorm, but rationals already explode in size!). To account for this, let's turn off adaptivity as well:

- - -
-prob = ODEProblem(f,1/2,(0//1,1//1),101//100);
-sol = solve(prob,RK4(),dt=1//2^(6),adaptive=false)
-println(sol)
-
- - -
-retcode: Success
-Interpolation: 3rd order Hermite
-t: Rational{Int64}[0//1, 1//64, 1//32, 3//64, 1//16, 5//64, 3//32, 7//64, 1
-//8, 9//64, 5//32, 11//64, 3//16, 13//64, 7//32, 15//64, 1//4, 17//64, 9//3
-2, 19//64, 5//16, 21//64, 11//32, 23//64, 3//8, 25//64, 13//32, 27//64, 7//
-16, 29//64, 15//32, 31//64, 1//2, 33//64, 17//32, 35//64, 9//16, 37//64, 19
-//32, 39//64, 5//8, 41//64, 21//32, 43//64, 11//16, 45//64, 23//32, 47//64,
- 3//4, 49//64, 25//32, 51//64, 13//16, 53//64, 27//32, 55//64, 7//8, 57//64
-, 29//32, 59//64, 15//16, 61//64, 31//32, 63//64, 1//1]
-u: [0.5, 0.507953, 0.516033, 0.524241, 0.53258, 0.541051, 0.549658, 0.55840
-1, 0.567283, 0.576306, 0.585473, 0.594786, 0.604247, 0.613858, 0.623623, 0.
-633542, 0.64362, 0.653857, 0.664258, 0.674824, 0.685558, 0.696463, 0.707541
-, 0.718795, 0.730229, 0.741844, 0.753644, 0.765632, 0.777811, 0.790183, 0.8
-02752, 0.815521, 0.828493, 0.841671, 0.855059, 0.86866, 0.882477, 0.896514,
- 0.910775, 0.925262, 0.93998, 0.954931, 0.970121, 0.985552, 1.00123, 1.0171
-5, 1.03333, 1.04977, 1.06647, 1.08343, 1.10067, 1.11817, 1.13596, 1.15403, 
-1.17239, 1.19103, 1.20998, 1.22923, 1.24878, 1.26864, 1.28882, 1.30932, 1.3
-3015, 1.35131, 1.3728]
-
- - -

Now let's do something fun. Let's change the solution to use Rational{BigInt} and print out the value at the end of the simulation. To do so, simply change the definition of the initial condition.

- - -
-prob = ODEProblem(f,BigInt(1)//BigInt(2),(0//1,1//1),101//100);
-sol =solve(prob,RK4(),dt=1//2^(6),adaptive=false)
-println(sol[end])
-
- - -
-415403291938655888343294424838034348376204408921988582429386196369066828013
-380062427154556444246064110042147806995712770513313913105317131993928991562
-472219540324173687134074558951938783349315387199475055050716642476760417033
-833225395963069751630544424879625010648869655282442577465289103178163815663
-464066572670655356269579471636764679863656649012559514171272038086748586891
-653145664881452891757769341753396504927956887980186316721217138912802907978
-839488971277351483679854338427632656105429434285170828205087679096886906512
-836058415177000071451519455149761416134211934766818795085616643778333812510
-724294609438512646808081849075509246961483574876752196687093709017376892988
-720208689912813268920171256693582145356856885176190731036088900945481923320
-301926151164642204512204346142796306783141982263276125756548530824427611816
-333393407861066935488564588880674178922907680658650707284447124975289884078
-283531881659241492248450685643985785207092880524994430296917090030308304496
-2139908567605824428891872081720287044135359380045755621121//302595526357001
-916401850227786985339805854374596312639728370747077589271270423243703004392
-074003302619884721642626495128918849830763359112247111187416392615737498981
-461087857422550657171300852094084580555857942985570738231419687525783564788
-285621871741725085612510228468354691202070954415518824737971685957295081128
-193794470230767667945336581432859330595785427486755359414346047520148998708
-472579747503225700773992946775819105236957926068135290787592745892648489231
-548275787132390564752450502531598102790376905344412549120000000000000000000
-000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000000000000000000000
-0000000000000000000000000000000000000000000
-
- - -

That's one huge fraction!

-

Other Compatible Number Types

-

BigFloats

- - -
-prob_ode_biglinear = ODEProblem(f,big(1.0)/big(2.0),(big(0.0),big(1.0)),big(1.01))
-sol =solve(prob_ode_biglinear,Tsit5())
-println(sol[end])
-
- - -
-1.3728004409038087277892831823141155298533360144614213350145098661946611676
-11229
-
- - -

DoubleFloats.jl

-

There's are Float128-like types. Higher precision, but fixed and faster than arbitrary precision.

- - -
-using DoubleFloats
-prob_ode_doublelinear = ODEProblem(f,Double64(1)/Double64(2),(Double64(0),Double64(1)),Double64(1.01))
-sol =solve(prob_ode_doublelinear,Tsit5())
-println(sol[end])
-
- - -
-1.3728004409038075
-
- - -

ArbFloats

-

These high precision numbers which are much faster than Bigs for less than 500-800 bits of accuracy.

- - -
-using ArbNumerics
-prob_ode_arbfloatlinear = ODEProblem(f,ArbFloat(1)/ArbFloat(2),(ArbFloat(0.0),ArbFloat(1.0)),ArbFloat(1.01))
-sol =solve(prob_ode_arbfloatlinear,Tsit5())
-println(sol[end])
-
- - -
-1.372800440903808727789283182314
-
- - -

Incompatible Number Systems

-

DecFP.jl

-

Next let's try DecFP. DecFP is a fixed-precision decimals library which is made to give both performance but known decimals of accuracy. Having already installed DecFP with ]add DecFP, I can run the following:

- - -
-using DecFP
-prob_ode_decfplinear = ODEProblem(f,Dec128(1)/Dec128(2),(Dec128(0.0),Dec128(1.0)),Dec128(1.01))
-sol =solve(prob_ode_decfplinear,Tsit5())
-
- - -
-ERROR: StackOverflowError:
-
- - - -
-println(sol[end]); println(typeof(sol[end]))
-
- - -
-1.372800440903808727789283182314
-ArbNumerics.ArbFloat{128}
-
- - -

Decimals.jl

-

Install with ]add Decimals.

- - -
-using Decimals
-prob_ode_decimallinear = ODEProblem(f,[decimal("1.0")]./[decimal("2.0")],(0//1,1//1),decimal(1.01))
-sol =solve(prob_ode_decimallinear,RK4(),dt=1/2^(6)) #Fails
-
- - -
-ERROR: MethodError: Decimals.Decimal(::Rational{Int64}) is ambiguous. Candidates:
-  (::Type{T})(x::Rational{S}) where {S, T<:AbstractFloat} in Base at rational.jl:92
-  Decimals.Decimal(num::Real) in Decimals at C:\Users\accou\.julia\packages\Decimals\Qfcas\src\decimal.jl:13
-Possible fix, define
-  Decimals.Decimal(::Rational{S})
-
- - - -
-println(sol[end]); println(typeof(sol[end]))
-
- - -
-1.372800440903808727789283182314
-ArbNumerics.ArbFloat{128}
-
- - -

At the time of writing this, Decimals are not compatible. This is not on DifferentialEquations.jl's end, it's on partly on Decimal's end since it is not a subtype of Number. Thus it's not recommended you use Decimals with DifferentialEquations.jl

-

Conclusion

-

As you can see, DifferentialEquations.jl can use arbitrary Julia-defined number systems in its arithmetic. If you need 128-bit floats, i.e. a bit more precision but not arbitrary, DoubleFloats.jl is a very good choice! For arbitrary precision, ArbNumerics are the most feature-complete and give great performance compared to BigFloats, and thus I recommend their use when high-precision (less than 512-800 bits) is required. DecFP is a great library for high-performance decimal numbers and works well as well. Other number systems could use some modernization.

- - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("type_handling","number_types.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/type_handling/uncertainties.html b/html/type_handling/uncertainties.html deleted file mode 100644 index 40498baf..00000000 --- a/html/type_handling/uncertainties.html +++ /dev/null @@ -1,972 +0,0 @@ - - - - - - Numbers with Uncertainties - - - - - - - - - - - - - - - - - -
-
-
- -
-

Numbers with Uncertainties

-
Mosè Giordano, Chris Rackauckas
- -
- -

The result of a measurement should be given as a number with an attached uncertainties, besides the physical unit, and all operations performed involving the result of the measurement should propagate the uncertainty, taking care of correlation between quantities.

-

There is a Julia package for dealing with numbers with uncertainties: Measurements.jl. Thanks to Julia's features, DifferentialEquations.jl easily works together with Measurements.jl out-of-the-box.

-

This notebook will cover some of the examples from the tutorial about classical Physics.

-

Caveat about Measurement type

-

Before going on with the tutorial, we must point up a subtlety of Measurements.jl that you should be aware of:

- - -
-using Measurements
-
-5.23 ± 0.14 === 5.23 ± 0.14
-
- - -
-false
-
- - - -
-(5.23± 0.14) - (5.23 ± 0.14)
-
- - -
-0.0 ± 0.2
-
- - - -
-(5.23 ± 0.14) / (5.23 ± 0.14)
-
- - -
-1.0 ± 0.038
-
- - -

The two numbers above, even though have the same nominal value and the same uncertainties, are actually two different measurements that only by chance share the same figures and their difference and their ratio have a non-zero uncertainty. It is common in physics to get very similar, or even equal, results for a repeated measurement, but the two measurements are not the same thing.

-

Instead, if you have one measurement and want to perform some operations involving it, you have to assign it to a variable:

- - -
-x = 5.23 ± 0.14
-x === x
-
- - -
-true
-
- - - -
-x - x
-
- - -
-0.0 ± 0.0
-
- - - -
-x / x
-
- - -
-1.0 ± 0.0
-
- - -

Radioactive Decay of Carbon-14

-

The rate of decay of carbon-14 is governed by a first order linear ordinary differential equation

-

\[ -\frac{\mathrm{d}u(t)}{\mathrm{d}t} = -\frac{u(t)}{\tau} -\]

-

where $\tau$ is the mean lifetime of carbon-14, which is related to the half-life $t_{1/2} = (5730 \pm 40)$ years by the relation $\tau = t_{1/2}/\ln(2)$.

- - -
-using DifferentialEquations, Measurements, Plots
-
-# Half-life and mean lifetime of radiocarbon, in years
-t_12 = 5730 ± 40
-τ = t_12 / log(2)
-
-#Setup
-u₀ = 1 ± 0
-tspan = (0.0, 10000.0)
-
-#Define the problem
-radioactivedecay(u,p,t) = - u / τ
-
-#Pass to solver
-prob = ODEProblem(radioactivedecay, u₀, tspan)
-sol = solve(prob, Tsit5(), reltol = 1e-8)
-
-# Analytic solution
-u = exp.(- sol.t / τ)
-
-plot(sol.t, sol.u, label = "Numerical", xlabel = "Years", ylabel = "Fraction of Carbon-14")
-plot!(sol.t, u, label = "Analytic")
-
- - - - -

The two curves are perfectly superimposed, indicating that the numerical solution matches the analytic one. We can check that also the uncertainties are correctly propagated in the numerical solution:

- - -
-println("Quantity of carbon-14 after ",  sol.t[11], " years:")
-
- - -
-Quantity of carbon-14 after 5207.5228514026385 years:
-
- - - -
-println("Numerical: ", sol[11])
-
- - -
-Numerical: 0.5326215661145899 ± 0.0023422116367677525
-
- - - -
-println("Analytic:  ", u[11])
-
- - -
-Analytic:  0.5326215654338256 ± 0.002342211664674973
-
- - -

Both the value of the numerical solution and its uncertainty match the analytic solution within the requested tolerance. We can also note that close to 5730 years after the beginning of the decay (half-life of the radioisotope), the fraction of carbon-14 that survived is about 0.5.

-

Simple pendulum

-

Small angles approximation

-

The next problem we are going to study is the simple pendulum in the approximation of small angles. We address this simplified case because there exists an easy analytic solution to compare.

-

The differential equation we want to solve is

-

\[ -\ddot{\theta} + \frac{g}{L} \theta = 0 -\]

-

where $g = (9.79 \pm 0.02)~\mathrm{m}/\mathrm{s}^2$ is the gravitational acceleration measured where the experiment is carried out, and $L = (1.00 \pm 0.01)~\mathrm{m}$ is the length of the pendulum.

-

When you set up the problem for DifferentialEquations.jl remember to define the measurements as variables, as seen above.

- - -
-using DifferentialEquations, Measurements, Plots
-
-g = 9.79 ± 0.02; # Gravitational constants
-L = 1.00 ± 0.01; # Length of the pendulum
-
-#Initial Conditions
-u₀ = [0 ± 0, π / 60 ± 0.01] # Initial speed and initial angle
-tspan = (0.0, 6.3)
-
-#Define the problem
-function simplependulum(du,u,p,t)
-    θ  = u[1]
-     = u[2]
-    du[1] = 
-    du[2] = -(g/L)*θ
-end
-
-#Pass to solvers
-prob = ODEProblem(simplependulum, u₀, tspan)
-sol = solve(prob, Tsit5(), reltol = 1e-6)
-
-# Analytic solution
-u = u₀[2] .* cos.(sqrt(g / L) .* sol.t)
-
-plot(sol.t, getindex.(sol.u, 2), label = "Numerical")
-plot!(sol.t, u, label = "Analytic")
-
- - - - -

Also in this case there is a perfect superimposition between the two curves, including their uncertainties.

-

We can also have a look at the difference between the two solutions:

- - -
-plot(sol.t, getindex.(sol.u, 2) .- u, label = "")
-
- - - - -

Arbitrary amplitude

-

Now that we know how to solve differential equations involving numbers with uncertainties we can solve the simple pendulum problem without any approximation. This time the differential equation to solve is the following:

-

\[ -\ddot{\theta} + \frac{g}{L} \sin(\theta) = 0 -\]

- - -
-g = 9.79 ± 0.02; # Gravitational constants
-L = 1.00 ± 0.01; # Length of the pendulum
-
-#Initial Conditions
-u₀ = [0 ± 0, π / 3 ± 0.02] # Initial speed and initial angle
-tspan = (0.0, 6.3)
-
-#Define the problem
-function simplependulum(du,u,p,t)
-    θ  = u[1]
-     = u[2]
-    du[1] = 
-    du[2] = -(g/L) * sin(θ)
-end
-
-#Pass to solvers
-prob = ODEProblem(simplependulum, u₀, tspan)
-sol = solve(prob, Tsit5(), reltol = 1e-6)
-
-plot(sol.t, getindex.(sol.u, 2), label = "Numerical")
-
- - - - -

We note that in this case the period of the oscillations is not constant.

- - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("type_handling","uncertainties.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/type_handling/unitful.html b/html/type_handling/unitful.html deleted file mode 100644 index e48008de..00000000 --- a/html/type_handling/unitful.html +++ /dev/null @@ -1,874 +0,0 @@ - - - - - - Unit Checked Arithmetic via Unitful.jl - - - - - - - - - - - - - - - - - -
-
-
- -
-

Unit Checked Arithmetic via Unitful.jl

-
Chris Rackauckas
- -
- -

Units and dimensional analysis are standard tools across the sciences for checking the correctness of your equation. However, most ODE solvers only allow for the equation to be in dimensionless form, leaving it up to the user to both convert the equation to a dimensionless form, punch in the equations, and hopefully not make an error along the way.

-

DifferentialEquations.jl allows for one to use Unitful.jl to have unit-checked arithmetic natively in the solvers. Given the dispatch implementation of the Unitful, this has little overhead.

-

Using Unitful

-

To use Unitful, you need to have the package installed. Then you can add units to your variables. For example:

- - -
-using Unitful
-t = 1.0u"s"
-
- - -
-1.0 s
-
- - -

Notice that t is a variable with units in seconds. If we make another value with seconds, they can add

- - -
-t2 = 1.02u"s"
-t+t2
-
- - -
-2.02 s
-
- - -

and they can multiply:

- - -
-t*t2
-
- - -
-1.02 s^2
-
- - -

You can even do rational roots:

- - -
-sqrt(t)
-
- - -
-1.0 s^1/2
-
- - -

Many operations work. These operations will check to make sure units are correct, and will throw an error for incorrect operations:

- - -
-t + sqrt(t)
-
- - -
-ERROR: DimensionError: 1.0 s and 1.0 s^1/2 are not dimensionally compatible.
-
- - -

Using Unitful with DifferentialEquations.jl

-

Just like with other number systems, you can choose the units for your numbers by simply specifying the units of the initial condition and the timestep. For example, to solve the linear ODE where the variable has units of Newton's and t is in Seconds, we would use:

- - -
-using DifferentialEquations
-f = (y,p,t) -> 0.5*y
-u0 = 1.5u"N"
-prob = ODEProblem(f,u0,(0.0u"s",1.0u"s"))
-sol = solve(prob,Tsit5())
-
- - -
-ERROR: DimensionError: N s^-1 and 0.75 N are not dimensionally compatible.
-
- - -

Notice that we recieved a unit mismatch error. This is correctly so! Remember that for an ODE:

-

\[ -\frac{dy}{dt} = f(t,y) -\]

-

we must have that f is a rate, i.e. f is a change in y per unit time. So we need to fix the units of f in our example to be N/s. Notice that we then do not receive an error if we do the following:

- - -
-f = (y,p,t) -> 0.5*y/3.0u"s"
-prob = ODEProblem(f,u0,(0.0u"s",1.0u"s"))
-sol = solve(prob,Tsit5())
-
- - -
-retcode: Success
-Interpolation: specialized 4th order "free" interpolation
-t: 3-element Array{Unitful.Quantity{Float64,𝐓,Unitful.FreeUnits{(s,),𝐓,nothing}},1}:
-                 0.0 s
- 0.14311598261241779 s
-                 1.0 s
-u: 3-element Array{Unitful.Quantity{Float64,𝐋*𝐌*𝐓^-2,Unitful.FreeUnits{(N,),𝐋*𝐌*𝐓^-2,nothing}},1}:
-                1.5 N
- 1.5362091208988309 N
- 1.7720406194871123 N
-
- - -

This gives a a normal solution object. Notice that the values are all with the correct units:

- - -
-print(sol[:])
-
- - -
-Unitful.Quantity{Float64,𝐋*𝐌*𝐓^-2,Unitful.FreeUnits{(N,),𝐋*𝐌*𝐓^-2,nothing}}[1.5 N, 1.53621 N, 1.77204 N]
-
- - -

We can plot the solution by removing the units:

- - -
-using Plots
-gr()
-plot(ustrip(sol.t),ustrip(sol[:]),lw=3)
-
- - - - - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("type_handling","unitful.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i5-7200U CPU @ 2.50GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\Chris Rackauckas\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 2
-
-
-

Package Information:

-
-
Status `C:\Users\Chris Rackauckas\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.7
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.7.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.8
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.8.1
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/notebook/advanced/beeler_reuter.ipynb b/notebook/advanced/beeler_reuter.ipynb deleted file mode 100644 index 1fa6731e..00000000 --- a/notebook/advanced/beeler_reuter.ipynb +++ /dev/null @@ -1,348 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "## Background\n\n[JuliaDiffEq](https://github.com/JuliaDiffEq) is a suite of optimized Julia libraries to solve ordinary differential equations (ODE). *JuliaDiffEq* provides a large number of explicit and implicit solvers suited for different types of ODE problems. It is possible to reduce a system of partial differential equations into an ODE problem by employing the [method of lines (MOL)](https://en.wikipedia.org/wiki/Method_of_lines). The essence of MOL is to discretize the spatial derivatives (by finite difference, finite volume or finite element methods) into algebraic equations and to keep the time derivatives as is. The resulting differential equations are left with only one independent variable (time) and can be solved with an ODE solver. [Solving Systems of Stochastic PDEs and using GPUs in Julia](http://www.stochasticlifestyle.com/solving-systems-stochastic-pdes-using-gpus-julia/) is a brief introduction to MOL and using GPUs to accelerate PDE solving in *JuliaDiffEq*. Here we expand on this introduction by developing an implicit/explicit (IMEX) solver for a 2D cardiac electrophysiology model and show how to use [CuArray](https://github.com/JuliaGPU/CuArrays.jl) and [CUDAnative](https://github.com/JuliaGPU/CUDAnative.jl) libraries to run the explicit part of the model on a GPU.\n\nNote that this tutorial does not use the [higher order IMEX methods built into DifferentialEquations.jl](http://docs.juliadiffeq.org/latest/solvers/split_ode_solve.html#Implicit-Explicit-(IMEX)-ODE-1) but instead shows how to hand-split an equation when the explicit portion has an analytical solution (or approxiate), which is common in many scenarios.\n\nThere are hundreds of ionic models that describe cardiac electrical activity in various degrees of detail. Most are based on the classic [Hodgkin-Huxley model](https://en.wikipedia.org/wiki/Hodgkin%E2%80%93Huxley_model) and define the time-evolution of different state variables in the form of nonlinear first-order ODEs. The state vector for these models includes the transmembrane potential, gating variables, and ionic concentrations. The coupling between cells is through the transmembrame potential only and is described as a reaction-diffusion equation, which is a parabolic PDE,\n\n$$\\partial V / \\partial t = \\nabla (D \\nabla V) - \\frac {I_\\text{ion}} {C_m},$$\n\nwhere $V$ is the transmembrane potential, $D$ is a diffusion tensor, $I_\\text{ion}$ is the sum of the transmembrane currents and is calculated from the ODEs, and $C_m$ is the membrane capacitance and is usually assumed to be constant. Here we model a uniform and isotropic medium. Therefore, the model can be simplified to,\n\n$$\\partial V / \\partial t = D \\Delta{V} - \\frac {I_\\text{ion}} {C_m},$$\n\nwhere $D$ is now a scalar. By nature, these models have to deal with different time scales and are therefore classified as *stiff*. Commonly, they are solved using the explicit Euler method, usually with a closed form for the integration of the gating variables (the Rush-Larsen method, see below). We can also solve these problems using implicit or semi-implicit PDE solvers (e.g., the [Crank-Nicholson method](https://en.wikipedia.org/wiki/Crank%E2%80%93Nicolson_method) combined with an iterative solver). Higher order explicit methods such as Runge-Kutta and linear multi-step methods cannot overcome the stiffness and are not particularly helpful.\n\nIn this tutorial, we first develop a CPU-only IMEX solver and then show how to move the explicit part to a GPU.\n\n### The Beeler-Reuter Model\n\nWe have chosen the [Beeler-Reuter ventricular ionic model](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1283659/) as our example. It is a classic model first described in 1977 and is used as a base for many other ionic models. It has eight state variables, which makes it complicated enough to be interesting without obscuring the main points of the exercise. The eight state variables are: the transmembrane potential ($V$), sodium-channel activation and inactivation gates ($m$ and $h$, similar to the Hodgkin-Huxley model), with an additional slow inactivation gate ($j$), calcium-channel activation and deactivations gates ($d$ and $f$), a time-dependent inward-rectifying potassium current gate ($x_1$), and intracellular calcium concentration ($c$). There are four currents: a sodium current ($i_{Na}$), a calcium current ($i_{Ca}$), and two potassium currents, one time-dependent ($i_{x_1}$) and one background time-independent ($i_{K_1}$).\n\n## CPU-Only Beeler-Reuter Solver\n\nLet's start by developing a CPU only IMEX solver. The main idea is to use the *DifferentialEquations* framework to handle the implicit part of the equation and code the analytical approximation for explicit part separately. If no analytical approximation was known for the explicit part, one could use methods from [this list](http://docs.juliadiffeq.org/latest/solvers/split_ode_solve.html#Implicit-Explicit-(IMEX)-ODE-1).\n\nFirst, we define the model constants:\n# An Implicit/Explicit CUDA-Accelerated Solver for the 2D Beeler-Reuter Model\n### Shahriar Iravanian" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "const v0 = -84.624\nconst v1 = 10.0\nconst C_K1 = 1.0f0\nconst C_x1 = 1.0f0\nconst C_Na = 1.0f0\nconst C_s = 1.0f0\nconst D_Ca = 0.0f0\nconst D_Na = 0.0f0\nconst g_s = 0.09f0\nconst g_Na = 4.0f0\nconst g_NaC = 0.005f0\nconst ENa = 50.0f0 + D_Na\nconst γ = 0.5f0\nconst C_m = 1.0f0" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note that the constants are defined as `Float32` and not `Float64`. The reason is that most GPUs have many more single precision cores than double precision ones. To ensure uniformity between CPU and GPU, we also code most states variables as `Float32` except for the transmembrane potential, which is solved by an implicit solver provided by the Sundial library and needs to be `Float64`.\n\n### The State Structure\n\nNext, we define a struct to contain our state. `BeelerReuterCpu` is a functor and we will define a deriv function as its associated function." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "mutable struct BeelerReuterCpu <: Function\n t::Float64 # the last timestep time to calculate Δt\n diff_coef::Float64 # the diffusion-coefficient (coupling strength)\n\n C::Array{Float32, 2} # intracellular calcium concentration\n M::Array{Float32, 2} # sodium current activation gate (m)\n H::Array{Float32, 2} # sodium current inactivation gate (h)\n J::Array{Float32, 2} # sodium current slow inactivaiton gate (j)\n D::Array{Float32, 2} # calcium current activaiton gate (d)\n F::Array{Float32, 2} # calcium current inactivation gate (f)\n XI::Array{Float32, 2} # inward-rectifying potassium current (iK1)\n\n Δu::Array{Float64, 2} # place-holder for the Laplacian\n\n function BeelerReuterCpu(u0, diff_coef)\n self = new()\n\n ny, nx = size(u0)\n self.t = 0.0\n self.diff_coef = diff_coef\n\n self.C = fill(0.0001f0, (ny,nx))\n self.M = fill(0.01f0, (ny,nx))\n self.H = fill(0.988f0, (ny,nx))\n self.J = fill(0.975f0, (ny,nx))\n self.D = fill(0.003f0, (ny,nx))\n self.F = fill(0.994f0, (ny,nx))\n self.XI = fill(0.0001f0, (ny,nx))\n\n self.Δu = zeros(ny,nx)\n\n return self\n end\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Laplacian\n\nThe finite-difference Laplacian is calculated in-place by a 5-point stencil. The Neumann boundary condition is enforced. Note that we could have also used [DiffEqOperators.jl](https://github.com/JuliaDiffEq/DiffEqOperators.jl) to automate this step." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# 5-point stencil\nfunction laplacian(Δu, u)\n n1, n2 = size(u)\n\n # internal nodes\n for j = 2:n2-1\n for i = 2:n1-1\n @inbounds Δu[i,j] = u[i+1,j] + u[i-1,j] + u[i,j+1] + u[i,j-1] - 4*u[i,j]\n end\n end\n\n # left/right edges\n for i = 2:n1-1\n @inbounds Δu[i,1] = u[i+1,1] + u[i-1,1] + 2*u[i,2] - 4*u[i,1]\n @inbounds Δu[i,n2] = u[i+1,n2] + u[i-1,n2] + 2*u[i,n2-1] - 4*u[i,n2]\n end\n\n # top/bottom edges\n for j = 2:n2-1\n @inbounds Δu[1,j] = u[1,j+1] + u[1,j-1] + 2*u[2,j] - 4*u[1,j]\n @inbounds Δu[n1,j] = u[n1,j+1] + u[n1,j-1] + 2*u[n1-1,j] - 4*u[n1,j]\n end\n\n # corners\n @inbounds Δu[1,1] = 2*(u[2,1] + u[1,2]) - 4*u[1,1]\n @inbounds Δu[n1,1] = 2*(u[n1-1,1] + u[n1,2]) - 4*u[n1,1]\n @inbounds Δu[1,n2] = 2*(u[2,n2] + u[1,n2-1]) - 4*u[1,n2]\n @inbounds Δu[n1,n2] = 2*(u[n1-1,n2] + u[n1,n2-1]) - 4*u[n1,n2]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### The Rush-Larsen Method\n\nWe use an explicit solver for all the state variables except for the transmembrane potential which is solved with the help of an implicit solver. The explicit solver is a domain-specific exponential method, the Rush-Larsen method. This method utilizes an approximation on the model in order to transform the IMEX equation into a form suitable for an implicit ODE solver. This combination of implicit and explicit methods forms a specialized IMEX solver. For general IMEX integration, please see the [IMEX solvers documentation](http://docs.juliadiffeq.org/latest/solvers/split_ode_solve.html#Implicit-Explicit-%28IMEX%29-ODE-1). While we could have used the general model to solve the current problem, for this specific model, the transformation approach is more efficient and is of practical interest.\n\nThe [Rush-Larsen](https://ieeexplore.ieee.org/document/4122859/) method replaces the explicit Euler integration for the gating variables with direct integration. The starting point is the general ODE for the gating variables in Hodgkin-Huxley style ODEs,\n\n$$\\frac{dg}{dt} = \\alpha(V) (1 - g) - \\beta(V) g$$\n\nwhere $g$ is a generic gating variable, ranging from 0 to 1, and $\\alpha$ and $\\beta$ are reaction rates. This equation can be written as,\n\n$$\\frac{dg}{dt} = (g_{\\infty} - g) / \\tau_g,$$\n\nwhere $g_\\infty$ and $\\tau_g$ are\n\n$$g_{\\infty} = \\frac{\\alpha}{(\\alpha + \\beta)},$$\n\nand,\n\n$$\\tau_g = \\frac{1}{(\\alpha + \\beta)}.$$\n\nAssuing that $g_\\infty$ and $\\tau_g$ are constant for the duration of a single time step ($\\Delta{t}$), which is a reasonable assumption for most cardiac models, we can integrate directly to have,\n\n$$g(t + \\Delta{t}) = g_{\\infty} - \\left(g_{\\infty} - g(\\Delta{t})\\right)\\,e^{-\\Delta{t}/\\tau_g}.$$\n\nThis is the Rush-Larsen technique. Note that as $\\Delta{t} \\rightarrow 0$, this equations morphs into the explicit Euler formula,\n\n$$g(t + \\Delta{t}) = g(t) + \\Delta{t}\\frac{dg}{dt}.$$\n\n`rush_larsen` is a helper function that use the Rush-Larsen method to integrate the gating variables." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@inline function rush_larsen(g, α, β, Δt)\n inf = α/(α+β)\n τ = 1f0 / (α+β)\n return clamp(g + (g - inf) * expm1(-Δt/τ), 0f0, 1f0)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The gating variables are updated as below. The details of how to calculate $\\alpha$ and $\\beta$ are based on the Beeler-Reuter model and not of direct interest to this tutorial." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function update_M_cpu(g, v, Δt)\n # the condition is needed here to prevent NaN when v == 47.0\n α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (exp(-0.1f0*(v+47.0f0)) - 1.0f0)\n β = (40.0f0 * exp(-0.056f0*(v+72.0f0)))\n return rush_larsen(g, α, β, Δt)\nend\n\nfunction update_H_cpu(g, v, Δt)\n α = 0.126f0 * exp(-0.25f0*(v+77.0f0))\n β = 1.7f0 / (exp(-0.082f0*(v+22.5f0)) + 1.0f0)\n return rush_larsen(g, α, β, Δt)\nend\n\nfunction update_J_cpu(g, v, Δt)\n α = (0.55f0 * exp(-0.25f0*(v+78.0f0))) / (exp(-0.2f0*(v+78.0f0)) + 1.0f0)\n β = 0.3f0 / (exp(-0.1f0*(v+32.0f0)) + 1.0f0)\n return rush_larsen(g, α, β, Δt)\nend\n\nfunction update_D_cpu(g, v, Δt)\n α = γ * (0.095f0 * exp(-0.01f0*(v-5.0f0))) / (exp(-0.072f0*(v-5.0f0)) + 1.0f0)\n β = γ * (0.07f0 * exp(-0.017f0*(v+44.0f0))) / (exp(0.05f0*(v+44.0f0)) + 1.0f0)\n return rush_larsen(g, α, β, Δt)\nend\n\nfunction update_F_cpu(g, v, Δt)\n α = γ * (0.012f0 * exp(-0.008f0*(v+28.0f0))) / (exp(0.15f0*(v+28.0f0)) + 1.0f0)\n β = γ * (0.0065f0 * exp(-0.02f0*(v+30.0f0))) / (exp(-0.2f0*(v+30.0f0)) + 1.0f0)\n return rush_larsen(g, α, β, Δt)\nend\n\nfunction update_XI_cpu(g, v, Δt)\n α = (0.0005f0 * exp(0.083f0*(v+50.0f0))) / (exp(0.057f0*(v+50.0f0)) + 1.0f0)\n β = (0.0013f0 * exp(-0.06f0*(v+20.0f0))) / (exp(-0.04f0*(v+20.0f0)) + 1.0f0)\n return rush_larsen(g, α, β, Δt)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The intracelleular calcium is not technically a gating variable, but we can use a similar explicit exponential integrator for it." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function update_C_cpu(g, d, f, v, Δt)\n ECa = D_Ca - 82.3f0 - 13.0278f0 * log(g)\n kCa = C_s * g_s * d * f\n iCa = kCa * (v - ECa)\n inf = 1.0f-7 * (0.07f0 - g)\n τ = 1f0 / 0.07f0\n return g + (g - inf) * expm1(-Δt/τ)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Implicit Solver\n\nNow, it is time to define the derivative function as an associated function of **BeelerReuterCpu**. We plan to use the CVODE_BDF solver as our implicit portion. Similar to other iterative methods, it calls the deriv function with the same $t$ multiple times. For example, these are consecutive $t$s from a representative run:\n\n0.86830\n0.86830\n0.85485\n0.85485\n0.85485\n0.86359\n0.86359\n0.86359\n0.87233\n0.87233\n0.87233\n0.88598\n...\n\nHere, every time step is called three times. We distinguish between two types of calls to the deriv function. When $t$ changes, the gating variables are updated by calling `update_gates_cpu`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function update_gates_cpu(u, XI, M, H, J, D, F, C, Δt)\n let Δt = Float32(Δt)\n n1, n2 = size(u)\n for j = 1:n2\n for i = 1:n1\n v = Float32(u[i,j])\n\n XI[i,j] = update_XI_cpu(XI[i,j], v, Δt)\n M[i,j] = update_M_cpu(M[i,j], v, Δt)\n H[i,j] = update_H_cpu(H[i,j], v, Δt)\n J[i,j] = update_J_cpu(J[i,j], v, Δt)\n D[i,j] = update_D_cpu(D[i,j], v, Δt)\n F[i,j] = update_F_cpu(F[i,j], v, Δt)\n\n C[i,j] = update_C_cpu(C[i,j], D[i,j], F[i,j], v, Δt)\n end\n end\n end\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "On the other hand, du is updated at each time step, since it is independent of $\\Delta{t}$." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# iK1 is the inward-rectifying potassium current\nfunction calc_iK1(v)\n ea = exp(0.04f0*(v+85f0))\n eb = exp(0.08f0*(v+53f0))\n ec = exp(0.04f0*(v+53f0))\n ed = exp(-0.04f0*(v+23f0))\n return 0.35f0 * (4f0*(ea-1f0)/(eb + ec)\n + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed)))\nend\n\n# ix1 is the time-independent background potassium current\nfunction calc_ix1(v, xi)\n ea = exp(0.04f0*(v+77f0))\n eb = exp(0.04f0*(v+35f0))\n return xi * 0.8f0 * (ea-1f0) / eb\nend\n\n# iNa is the sodium current (similar to the classic Hodgkin-Huxley model)\nfunction calc_iNa(v, m, h, j)\n return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa)\nend\n\n# iCa is the calcium current\nfunction calc_iCa(v, d, f, c)\n ECa = D_Ca - 82.3f0 - 13.0278f0 * log(c) # ECa is the calcium reversal potential\n return C_s * g_s * d * f * (v - ECa)\nend\n\nfunction update_du_cpu(du, u, XI, M, H, J, D, F, C)\n n1, n2 = size(u)\n\n for j = 1:n2\n for i = 1:n1\n v = Float32(u[i,j])\n\n # calculating individual currents\n iK1 = calc_iK1(v)\n ix1 = calc_ix1(v, XI[i,j])\n iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j])\n iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j])\n\n # total current\n I_sum = iK1 + ix1 + iNa + iCa\n\n # the reaction part of the reaction-diffusion equation\n du[i,j] = -I_sum / C_m\n end\n end\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Finally, we put everything together is our deriv function, which is a call on `BeelerReuterCpu`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function (f::BeelerReuterCpu)(du, u, p, t)\n Δt = t - f.t\n\n if Δt != 0 || t == 0\n update_gates_cpu(u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C, Δt)\n f.t = t\n end\n\n laplacian(f.Δu, u)\n\n # calculate the reaction portion\n update_du_cpu(du, u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C)\n\n # ...add the diffusion portion\n du .+= f.diff_coef .* f.Δu\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Results\n\nTime to test! We need to define the starting transmembrane potential with the help of global constants **v0** and **v1**, which represent the resting and activated potentials." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "const N = 192;\nu0 = fill(v0, (N, N));\nu0[90:102,90:102] .= v1; # a small square in the middle of the domain" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The initial condition is a small square in the middle of the domain." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots\nheatmap(u0)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Next, the problem is defined:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, Sundials\n\nderiv_cpu = BeelerReuterCpu(u0, 1.0);\nprob = ODEProblem(deriv_cpu, u0, (0.0, 50.0));" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "For stiff reaction-diffusion equations, CVODE_BDF from Sundial library is an excellent solver." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "heatmap(sol.u[end])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## CPU/GPU Beeler-Reuter Solver\n\nGPUs are great for embarrassingly parallel problems but not so much for highly coupled models. We plan to keep the implicit part on CPU and run the decoupled explicit code on a GPU with the help of the CUDAnative library.\n\n### GPUs and CUDA\n\nIt this section, we present a brief summary of how GPUs (specifically NVIDIA GPUs) work and how to program them using the Julia CUDA interface. The readers who are familiar with these basic concepts may skip this section.\n\nLet's start by looking at the hardware of a typical high-end GPU, GTX 1080. It has four Graphics Processing Clusters (equivalent to a discrete CPU), each harboring five Streaming Multiprocessor (similar to a CPU core). Each SM has 128 single-precision CUDA cores. Therefore, GTX 1080 has a total of 4 x 5 x 128 = 2560 CUDA cores. The maximum theoretical throughput for a GTX 1080 is reported as 8.87 TFLOPS. This figure is calculated for a boost clock frequency of 1.733 MHz as 2 x 2560 x 1.733 MHz = 8.87 TFLOPS. The factor 2 is included because two single floating point operations, a multiplication and an addition, can be done in a clock cycle as part of a fused-multiply-addition FMA operation. GTX 1080 also has 8192 MB of global memory accessible to all the cores (in addition to local and shared memory on each SM).\n\nA typical CUDA application has the following flow:\n\n1. Define and initialize the problem domain tensors (multi-dimensional arrays) in CPU memory.\n2. Allocate corresponding tensors in the GPU global memory.\n3. Transfer the input tensors from CPU to the corresponding GPU tensors.\n4. Invoke CUDA kernels (i.e., the GPU functions callable from CPU) that operate on the GPU tensors.\n5. Transfer the result tensors from GPU back to CPU.\n6. Process tensors on CPU.\n7. Repeat steps 3-6 as needed.\n\nSome libraries, such as [ArrayFire](https://github.com/arrayfire/arrayfire), hide the complexicities of steps 2-5 behind a higher level of abstraction. However, here we take a lower level route. By using [CuArray](https://github.com/JuliaGPU/CuArrays.jl) and [CUDAnative](https://github.com/JuliaGPU/CUDAnative.jl), we achieve a finer-grained control and higher performance. In return, we need to implement each step manually.\n\n*CuArray* is a thin abstraction layer over the CUDA API and allows us to define GPU-side tensors and copy data to and from them but does not provide for operations on tensors. *CUDAnative* is a compiler that translates Julia functions designated as CUDA kernels into ptx (a high-level CUDA assembly language).\n\n### The CUDA Code\n\nThe key to fast CUDA programs is to minimize CPU/GPU memory transfers and global memory accesses. The implicit solver is currently CPU only, but it only needs access to the transmembrane potential. The rest of state variables reside on the GPU memory.\n\nWe modify ``BeelerReuterCpu`` into ``BeelerReuterGpu`` by defining the state variables as *CuArray*s instead of standard Julia *Array*s. The name of each variable defined on GPU is prefixed by *d_* for clarity. Note that $\\Delta{v}$ is a temporary storage for the Laplacian and stays on the CPU side." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using CUDAnative, CuArrays\n\nmutable struct BeelerReuterGpu <: Function\n t::Float64 # the last timestep time to calculate Δt\n diff_coef::Float64 # the diffusion-coefficient (coupling strength)\n\n d_C::CuArray{Float32, 2} # intracellular calcium concentration\n d_M::CuArray{Float32, 2} # sodium current activation gate (m)\n d_H::CuArray{Float32, 2} # sodium current inactivation gate (h)\n d_J::CuArray{Float32, 2} # sodium current slow inactivaiton gate (j)\n d_D::CuArray{Float32, 2} # calcium current activaiton gate (d)\n d_F::CuArray{Float32, 2} # calcium current inactivation gate (f)\n d_XI::CuArray{Float32, 2} # inward-rectifying potassium current (iK1)\n\n d_u::CuArray{Float64, 2} # place-holder for u in the device memory\n d_du::CuArray{Float64, 2} # place-holder for d_u in the device memory\n\n Δv::Array{Float64, 2} # place-holder for voltage gradient\n\n function BeelerReuterGpu(u0, diff_coef)\n self = new()\n\n ny, nx = size(u0)\n @assert (nx % 16 == 0) && (ny % 16 == 0)\n self.t = 0.0\n self.diff_coef = diff_coef\n\n self.d_C = CuArray(fill(0.0001f0, (ny,nx)))\n self.d_M = CuArray(fill(0.01f0, (ny,nx)))\n self.d_H = CuArray(fill(0.988f0, (ny,nx)))\n self.d_J = CuArray(fill(0.975f0, (ny,nx)))\n self.d_D = CuArray(fill(0.003f0, (ny,nx)))\n self.d_F = CuArray(fill(0.994f0, (ny,nx)))\n self.d_XI = CuArray(fill(0.0001f0, (ny,nx)))\n\n self.d_u = CuArray(u0)\n self.d_du = CuArray(zeros(ny,nx))\n\n self.Δv = zeros(ny,nx)\n\n return self\n end\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The Laplacian function remains unchanged. The main change to the explicit gating solvers is that *exp* and *expm1* functions are prefixed by *CUDAnative.*. This is a technical nuisance that will hopefully be resolved in future." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function rush_larsen_gpu(g, α, β, Δt)\n inf = α/(α+β)\n τ = 1.0/(α+β)\n return clamp(g + (g - inf) * CUDAnative.expm1(-Δt/τ), 0f0, 1f0)\nend\n\nfunction update_M_gpu(g, v, Δt)\n # the condition is needed here to prevent NaN when v == 47.0\n α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (CUDAnative.exp(-0.1f0*(v+47.0f0)) - 1.0f0)\n β = (40.0f0 * CUDAnative.exp(-0.056f0*(v+72.0f0)))\n return rush_larsen_gpu(g, α, β, Δt)\nend\n\nfunction update_H_gpu(g, v, Δt)\n α = 0.126f0 * CUDAnative.exp(-0.25f0*(v+77.0f0))\n β = 1.7f0 / (CUDAnative.exp(-0.082f0*(v+22.5f0)) + 1.0f0)\n return rush_larsen_gpu(g, α, β, Δt)\nend\n\nfunction update_J_gpu(g, v, Δt)\n α = (0.55f0 * CUDAnative.exp(-0.25f0*(v+78.0f0))) / (CUDAnative.exp(-0.2f0*(v+78.0f0)) + 1.0f0)\n β = 0.3f0 / (CUDAnative.exp(-0.1f0*(v+32.0f0)) + 1.0f0)\n return rush_larsen_gpu(g, α, β, Δt)\nend\n\nfunction update_D_gpu(g, v, Δt)\n α = γ * (0.095f0 * CUDAnative.exp(-0.01f0*(v-5.0f0))) / (CUDAnative.exp(-0.072f0*(v-5.0f0)) + 1.0f0)\n β = γ * (0.07f0 * CUDAnative.exp(-0.017f0*(v+44.0f0))) / (CUDAnative.exp(0.05f0*(v+44.0f0)) + 1.0f0)\n return rush_larsen_gpu(g, α, β, Δt)\nend\n\nfunction update_F_gpu(g, v, Δt)\n α = γ * (0.012f0 * CUDAnative.exp(-0.008f0*(v+28.0f0))) / (CUDAnative.exp(0.15f0*(v+28.0f0)) + 1.0f0)\n β = γ * (0.0065f0 * CUDAnative.exp(-0.02f0*(v+30.0f0))) / (CUDAnative.exp(-0.2f0*(v+30.0f0)) + 1.0f0)\n return rush_larsen_gpu(g, α, β, Δt)\nend\n\nfunction update_XI_gpu(g, v, Δt)\n α = (0.0005f0 * CUDAnative.exp(0.083f0*(v+50.0f0))) / (CUDAnative.exp(0.057f0*(v+50.0f0)) + 1.0f0)\n β = (0.0013f0 * CUDAnative.exp(-0.06f0*(v+20.0f0))) / (CUDAnative.exp(-0.04f0*(v+20.0f0)) + 1.0f0)\n return rush_larsen_gpu(g, α, β, Δt)\nend\n\nfunction update_C_gpu(c, d, f, v, Δt)\n ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c)\n kCa = C_s * g_s * d * f\n iCa = kCa * (v - ECa)\n inf = 1.0f-7 * (0.07f0 - c)\n τ = 1f0 / 0.07f0\n return c + (c - inf) * CUDAnative.expm1(-Δt/τ)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Similarly, we modify the functions to calculate the individual currents by adding CUDAnative prefix." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# iK1 is the inward-rectifying potassium current\nfunction calc_iK1(v)\n ea = CUDAnative.exp(0.04f0*(v+85f0))\n eb = CUDAnative.exp(0.08f0*(v+53f0))\n ec = CUDAnative.exp(0.04f0*(v+53f0))\n ed = CUDAnative.exp(-0.04f0*(v+23f0))\n return 0.35f0 * (4f0*(ea-1f0)/(eb + ec)\n + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed)))\nend\n\n# ix1 is the time-independent background potassium current\nfunction calc_ix1(v, xi)\n ea = CUDAnative.exp(0.04f0*(v+77f0))\n eb = CUDAnative.exp(0.04f0*(v+35f0))\n return xi * 0.8f0 * (ea-1f0) / eb\nend\n\n# iNa is the sodium current (similar to the classic Hodgkin-Huxley model)\nfunction calc_iNa(v, m, h, j)\n return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa)\nend\n\n# iCa is the calcium current\nfunction calc_iCa(v, d, f, c)\n ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c) # ECa is the calcium reversal potential\n return C_s * g_s * d * f * (v - ECa)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### CUDA Kernels\n\nA CUDA program does not directly deal with GPCs and SMs. The logical view of a CUDA program is in the term of *blocks* and *threads*. We have to specify the number of block and threads when running a CUDA *kernel*. Each thread runs on a single CUDA core. Threads are logically bundled into blocks, which are in turn specified on a grid. The grid stands for the entirety of the domain of interest.\n\nEach thread can find its logical coordinate by using few pre-defined indexing variables (*threadIdx*, *blockIdx*, *blockDim* and *gridDim*) in C/C++ and the corresponding functions (e.g., `threadIdx()`) in Julia. There variables and functions are defined automatically for each thread and may return a different value depending on the calling thread. The return value of these functions is a 1, 2, or 3 dimensional structure whose elements can be accessed as `.x`, `.y`, and `.z` (for a 1-dimensional case, `.x` reports the actual index and `.y` and `.z` simply return 1). For example, if we deploy a kernel in 128 blocks and with 256 threads per block, each thread will see\n\n```\n gridDim.x = 128;\n blockDim=256;\n```\n\nwhile `blockIdx.x` ranges from 0 to 127 in C/C++ and 1 to 128 in Julia. Similarly, `threadIdx.x` will be between 0 to 255 in C/C++ (of course, in Julia the range will be 1 to 256).\n\nA C/C++ thread can calculate its index as\n\n```\n int idx = blockDim.x * blockIdx.x + threadIdx.x;\n```\n\nIn Julia, we have to take into account base 1. Therefore, we use the following formula\n\n```\n idx = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x\n```\n\nA CUDA programmer is free to interpret the calculated index however it fits the application, but in practice, it is usually interpreted as an index into input tensors.\n\nIn the GPU version of the solver, each thread works on a single element of the medium, indexed by a (x,y) pair.\n`update_gates_gpu` and `update_du_gpu` are very similar to their CPU counterparts but are in fact CUDA kernels where the *for* loops are replaced with CUDA specific indexing. Note that CUDA kernels cannot return a valve; hence, *nothing* at the end." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function update_gates_gpu(u, XI, M, H, J, D, F, C, Δt)\n i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x\n j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y\n\n v = Float32(u[i,j])\n\n let Δt = Float32(Δt)\n XI[i,j] = update_XI_gpu(XI[i,j], v, Δt)\n M[i,j] = update_M_gpu(M[i,j], v, Δt)\n H[i,j] = update_H_gpu(H[i,j], v, Δt)\n J[i,j] = update_J_gpu(J[i,j], v, Δt)\n D[i,j] = update_D_gpu(D[i,j], v, Δt)\n F[i,j] = update_F_gpu(F[i,j], v, Δt)\n\n C[i,j] = update_C_gpu(C[i,j], D[i,j], F[i,j], v, Δt)\n end\n nothing\nend\n\nfunction update_du_gpu(du, u, XI, M, H, J, D, F, C)\n i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x\n j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y\n\n v = Float32(u[i,j])\n\n # calculating individual currents\n iK1 = calc_iK1(v)\n ix1 = calc_ix1(v, XI[i,j])\n iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j])\n iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j])\n\n # total current\n I_sum = iK1 + ix1 + iNa + iCa\n\n # the reaction part of the reaction-diffusion equation\n du[i,j] = -I_sum / C_m\n nothing\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Implicit Solver\n\nFinally, the deriv function is modified to copy *u* to GPU and copy *du* back and to invoke CUDA kernels." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function (f::BeelerReuterGpu)(du, u, p, t)\n L = 16 # block size\n Δt = t - f.t\n copyto!(f.d_u, u)\n ny, nx = size(u)\n\n if Δt != 0 || t == 0\n @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_gates_gpu(\n f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C, Δt)\n f.t = t\n end\n\n laplacian(f.Δv, u)\n\n # calculate the reaction portion\n @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_du_gpu(\n f.d_du, f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C)\n\n copyto!(du, f.d_du)\n\n # ...add the diffusion portion\n du .+= f.diff_coef .* f.Δv\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Ready to test!" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, Sundials\n\nderiv_gpu = BeelerReuterGpu(u0, 1.0);\nprob = ODEProblem(deriv_gpu, u0, (0.0, 50.0));\n@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "heatmap(sol.u[end])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Summary\n\nWe achieve around a 6x speedup with running the explicit portion of our IMEX solver on a GPU. The major bottleneck of this technique is the communication between CPU and GPU. In its current form, not all of the internals of the method utilize GPU acceleration. In particular, the implicit equations solved by GMRES are performed on the CPU. This partial CPU nature also increases the amount of data transfer that is required between the GPU and CPU (performed every f call). Compiling the full ODE solver to the GPU would solve both of these issues and potentially give a much larger speedup. [JuliaDiffEq developers are currently working on solutions to alleviate these issues](http://www.stochasticlifestyle.com/solving-systems-stochastic-pdes-using-gpus-julia/), but these will only be compatible with native Julia solvers (and not Sundials)." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/introduction/callbacks_and_events.ipynb b/notebook/introduction/callbacks_and_events.ipynb deleted file mode 100644 index 61d62b35..00000000 --- a/notebook/introduction/callbacks_and_events.ipynb +++ /dev/null @@ -1,551 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "In working with a differential equation, our system will evolve through many states. Particular states of the system may be of interest to us, and we say that an ***\"event\"*** is triggered when our system reaches these states. For example, events may include the moment when our system reaches a particular temperature or velocity. We ***handle*** these events with ***callbacks***, which tell us what to do once an event has been triggered.\n\nThese callbacks allow for a lot more than event handling, however. For example, we can use callbacks to achieve high-level behavior like exactly preserve conservation laws and save the trace of a matrix at pre-defined time points. This extra functionality allows us to use the callback system as a modding system for the DiffEq ecosystem's solvers.\n\nThis tutorial is an introduction to the callback and event handling system in DifferentialEquations.jl, documented in the [Event Handling and Callback Functions](http://docs.juliadiffeq.org/latest/features/callback_functions.html) page of the documentation. We will also introduce you to some of the most widely used callbacks in the [Callback Library](http://docs.juliadiffeq.org/latest/features/callback_library.html), which is a library of pre-built mods.\n\n## Events and Continuous Callbacks\n\nEvent handling is done through continuous callbacks. Callbacks take a function, `condition`, which triggers an `affect!` when `condition == 0`. These callbacks are called \"continuous\" because they will utilize rootfinding on the interpolation to find the \"exact\" time point at which the condition takes place and apply the `affect!` at that time point.\n\n***Let's use a bouncing ball as a simple system to explain events and callbacks.*** Let's take Newton's model of a ball falling towards the Earth's surface via a gravitational constant `g`. In this case, the velocity is changing via `-g`, and position is changing via the velocity. Therefore we receive the system of ODEs:\n# Callbacks and Events\n### Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, ParameterizedFunctions\nball! = @ode_def BallBounce begin\n dy = v\n dv = -g\nend g" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We want the callback to trigger when `y=0` since that's when the ball will hit the Earth's surface (our event). We do this with the condition:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function condition(u,t,integrator)\n u[1]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Recall that the `condition` will trigger when it evaluates to zero, and here it will evaluate to zero when `u[1] == 0`, which occurs when `v == 0`. *Now we have to say what we want the callback to do.* Callbacks make use of the [Integrator Interface](http://docs.juliadiffeq.org/latest/basics/integrator.html). Instead of giving a full description, a quick and usable rundown is:\n\n- Values are strored in `integrator.u`\n- Times are stored in `integrator.t`\n- The parameters are stored in `integrator.p`\n- `integrator(t)` performs an interpolation in the current interval between `integrator.tprev` and `integrator.t` (and allows extrapolation)\n- User-defined options (tolerances, etc.) are stored in `integrator.opts`\n- `integrator.sol` is the current solution object. Note that `integrator.sol.prob` is the current problem\n\nWhile there's a lot more on the integrator interface page, that's a working knowledge of what's there.\n\nWhat we want to do with our `affect!` is to \"make the ball bounce\". Mathematically speaking, the ball bounces when the sign of the velocity flips. As an added behavior, let's also use a small friction constant to dampen the ball's velocity. This way only a percentage of the velocity will be retained when the event is triggered and the callback is used. We'll define this behavior in the `affect!` function:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function affect!(integrator)\n integrator.u[2] = -integrator.p[2] * integrator.u[2]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "`integrator.u[2]` is the second value of our model, which is `v` or velocity, and `integrator.p[2]`, is our friction coefficient.\n\nTherefore `affect!` can be read as follows: `affect!` will take the current value of velocity, and multiply it `-1` multiplied by our friction coefficient. Therefore the ball will change direction and its velocity will dampen when `affect!` is called.\n\nNow let's build the `ContinuousCallback`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "bounce_cb = ContinuousCallback(condition,affect!)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now let's make an `ODEProblem` which has our callback:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = [50.0,0.0]\ntspan = (0.0,15.0)\np = (9.8,0.9)\nprob = ODEProblem(ball!,u0,tspan,p,callback=bounce_cb)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that we chose a friction constant of `0.9`. Now we can solve the problem and plot the solution as we normally would:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5())\nusing Plots; gr()\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and tada, the ball bounces! Notice that the `ContinuousCallback` is using the interpolation to apply the effect \"exactly\" when `v == 0`. This is crucial for model correctness, and thus when this property is needed a `ContinuousCallback` should be used.\n\n#### Exercise 1\n\nIn our example we used a constant coefficient of friction, but if we are bouncing the ball in the same place we may be smoothing the surface (say, squishing the grass), causing there to be less friction after each bounce. In this more advanced model, we want the friction coefficient at the next bounce to be `sqrt(friction)` from the previous bounce (since `friction < 1`, `sqrt(friction) > friction` and `sqrt(friction) < 1`).\n\nHint: there are many ways to implement this. One way to do it is to make `p` a `Vector` and mutate the friction coefficient in the `affect!`.\n\n## Discrete Callbacks\n\nA discrete callback checks a `condition` after every integration step and, if true, it will apply an `affect!`. For example, let's say that at time `t=2` we want to include that a kid kicked the ball, adding `20` to the current velocity. This kind of situation, where we want to add a specific behavior which does not require rootfinding, is a good candidate for a `DiscreteCallback`. In this case, the `condition` is a boolean for whether to apply the `affect!`, so:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function condition_kick(u,t,integrator)\n t == 2\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We want the kick to occur at `t=2`, so we check for that time point. When we are at this time point, we want to do:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function affect_kick!(integrator)\n integrator.u[2] += 50\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now we build the problem as before:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "kick_cb = DiscreteCallback(condition_kick,affect_kick!)\nu0 = [50.0,0.0]\ntspan = (0.0,10.0)\np = (9.8,0.9)\nprob = ODEProblem(ball!,u0,tspan,p,callback=kick_cb)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note that, since we are requiring our effect at exactly the time `t=2`, we need to tell the integration scheme to step at exactly `t=2` to apply this callback. This is done via the option `tstops`, which is like `saveat` but means \"stop at these values\"." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5(),tstops=[2.0])\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note that this example could've been done with a `ContinuousCallback` by checking the condition `t-2`.\n\n## Merging Callbacks with Callback Sets\n\nIn some cases you may want to merge callbacks to build up more complex behavior. In our previous result, notice that the model is unphysical because the ball goes below zero! What we really need to do is add the bounce callback together with the kick. This can be achieved through the `CallbackSet`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "cb = CallbackSet(bounce_cb,kick_cb)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "A `CallbackSet` merges their behavior together. The logic is as follows. In a given interval, if there are multiple continuous callbacks that would trigger, only the one that triggers at the earliest time is used. The time is pulled back to where that continuous callback is triggered, and then the `DiscreteCallback`s in the callback set are called in order." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = [50.0,0.0]\ntspan = (0.0,15.0)\np = (9.8,0.9)\nprob = ODEProblem(ball!,u0,tspan,p,callback=cb)\nsol = solve(prob,Tsit5(),tstops=[2.0])\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that we have now merged the behaviors. We can then nest this as deep as we like.\n\n#### Exercise 2\n\nAdd to the model a linear wind with resistance that changes the acceleration to `-g + k*v` after `t=10`. Do so by adding another parameter and allowing it to be zero until a specific time point where a third callback triggers the change.\n\n## Integration Termination and Directional Handling\n\nLet's look at another model now: the model of the [Harmonic Oscillator](https://en.wikipedia.org/wiki/Harmonic_oscillator). We can write this as:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = [1.,0.]\nharmonic! = @ode_def HarmonicOscillator begin\n dv = -x\n dx = v\nend\ntspan = (0.0,10.0)\nprob = ODEProblem(harmonic!,u0,tspan)\nsol = solve(prob)\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's instead stop the integration when a condition is met. From the [Integrator Interface stepping controls](http://docs.juliadiffeq.org/latest/basics/integrator.html#Stepping-Controls-1) we see that `terminate!(integrator)` will cause the integration to end. So our new `affect!` is simply:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function terminate_affect!(integrator)\n terminate!(integrator)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's first stop the integration when the particle moves back to `x=0`. This means we want to use the condition:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function terminate_condition(u,t,integrator)\n u[2]\nend\nterminate_cb = ContinuousCallback(terminate_condition,terminate_affect!)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note that instead of adding callbacks to the problem, we can also add them to the `solve` command. This will automatically form a `CallbackSet` with any problem-related callbacks and naturally allows you to distinguish between model features and integration controls." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,callback=terminate_cb)\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that the harmonic oscilator's true solution here is `sin` and `cosine`, and thus we would expect this return to zero to happen at `t=π`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol.t[end]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This is one way to approximate π! Lower tolerances and arbitrary precision numbers can make this more exact, but let's not look at that. Instead, what if we wanted to halt the integration after exactly one cycle? To do so we would need to ignore the first zero-crossing. Luckily in these types of scenarios there's usually a structure to the problem that can be exploited. Here, we only want to trigger the `affect!` when crossing from positive to negative, and not when crossing from negative to positive. In other words, we want our `affect!` to only occur on upcrossings.\n\nIf the `ContinuousCallback` constructor is given a single `affect!`, it will occur on both upcrossings and downcrossings. If there are two `affect!`s given, then the first is for upcrossings and the second is for downcrossings. An `affect!` can be ignored by using `nothing`. Together, the \"upcrossing-only\" version of the effect means that the first `affect!` is what we defined above and the second is `nothing`. Therefore we want:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "terminate_upcrossing_cb = ContinuousCallback(terminate_condition,terminate_affect!,nothing)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Which gives us:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,callback=terminate_upcrossing_cb)\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Callback Library\n\nAs you can see, callbacks can be very useful and through `CallbackSets` we can merge together various behaviors. Because of this utility, there is a library of pre-built callbacks known as the [Callback Library](http://docs.juliadiffeq.org/latest/features/callback_library.html). We will walk through a few examples where these callbacks can come in handy.\n\n### Manifold Projection\n\nOne callback is the manifold projection callback. Essentially, you can define any manifold `g(sol)=0` which the solution must live on, and cause the integration to project to that manifold after every step. As an example, let's see what happens if we naively run the harmonic oscillator for a long time:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "tspan = (0.0,10000.0)\nprob = ODEProblem(harmonic!,u0,tspan)\nsol = solve(prob)\ngr(fmt=:png) # Make it a PNG instead of an SVG since there's a lot of points!\nplot(sol,vars=(1,2))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(0,1),denseplot=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that what's going on is that the numerical solution is drifting from the true solution over this long time scale. This is because the integrator is not conserving energy." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol.t,[u[2]^2 + u[1]^2 for u in sol.u]) # Energy ~ x^2 + v^2" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Some integration techniques like [symplectic integrators](http://docs.juliadiffeq.org/latest/solvers/dynamical_solve.html#Symplectic-Integrators-1) are designed to mitigate this issue, but instead let's tackle the problem by enforcing conservation of energy. To do so, we define our manifold as the one where energy equals 1 (since that holds in the initial condition), that is:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function g(resid,u,p,t)\n resid[1] = u[2]^2 + u[1]^2 - 1\n resid[2] = 0\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here the residual measures how far from our desired energy we are, and the number of conditions matches the size of our system (we ignored the second one by making the residual 0). Thus we define a `ManifoldProjection` callback and add that to the solver:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "cb = ManifoldProjection(g)\nsol = solve(prob,callback=cb)\nplot(sol,vars=(1,2))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(0,1),denseplot=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now we have \"perfect\" energy conservation, where if it's ever violated too much the solution will get projected back to `energy=1`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u1,u2 = sol[500]\nu2^2 + u1^2" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "While choosing different integration schemes and using lower tolerances can achieve this effect as well, this can be a nice way to enforce physical constraints and is thus used in many disciplines like molecular dynamics. Another such domain constraining callback is the [`PositiveCallback()`](http://docs.juliadiffeq.org/latest/features/callback_library.html#PositiveDomain-1) which can be used to enforce positivity of the variables.\n\n### SavingCallback\n\nThe `SavingCallback` can be used to allow for special saving behavior. Let's take a linear ODE define on a system of 1000x1000 matrices:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem((du,u,p,t)->du.=u,rand(1000,1000),(0.0,1.0))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "In fields like quantum mechanics you may only want to know specific properties of the solution such as the trace or the norm of the matrix. Saving all of the 1000x1000 matrices can be a costly way to get this information! Instead, we can use the `SavingCallback` to save the `trace` and `norm` at specified times. To do so, we first define our `SavedValues` cache. Our time is in terms of `Float64`, and we want to save tuples of `Float64`s (one for the `trace` and one for the `norm`), and thus we generate the cache as:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "saved_values = SavedValues(Float64, Tuple{Float64,Float64})" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now we define the `SavingCallback` by giving it a function of `(u,p,t,integrator)` that returns the values to save, and the cache:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using LinearAlgebra\ncb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here we take `u` and save `(tr(u),norm(u))`. When we solve with this callback:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Our values are stored in our `saved_values` variable:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "saved_values.t" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "saved_values.saveval" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "By default this happened only at the solver's steps. But the `SavingCallback` has similar controls as the integrator. For example, if we want to save at every `0.1` seconds, we do can so using `saveat`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "saved_values = SavedValues(Float64, Tuple{Float64,Float64}) # New cache\ncb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values, saveat = 0.0:0.1:1.0)\nsol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "saved_values.t" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "saved_values.saveval" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### Exercise 3\n\nGo back to the Harmonic oscillator. Use the `SavingCallback` to save an array for the energy over time, and do this both with and without the `ManifoldProjection`. Plot the results to see the difference the projection makes." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/introduction/choosing_algs.ipynb b/notebook/introduction/choosing_algs.ipynb deleted file mode 100644 index d0a623b6..00000000 --- a/notebook/introduction/choosing_algs.ipynb +++ /dev/null @@ -1,163 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "While the default algorithms, along with `alg_hints = [:stiff]`, will suffice in most cases, there are times when you may need to exert more control. The purpose of this part of the tutorial is to introduce you to some of the most widely used algorithm choices and when they should be used. The corresponding page of the documentation is the [ODE Solvers](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html) page which goes into more depth.\n\n## Diagnosing Stiffness\n\nOne of the key things to know for algorithm choices is whether your problem is stiff. Let's take for example the driven Van Der Pol equation:\n# Choosing an ODE Algorithm\n### Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, ParameterizedFunctions\nvan! = @ode_def VanDerPol begin\n dy = μ*((1-x^2)*y - x)\n dx = 1*y\nend μ\n\nprob = ODEProblem(van!,[0.0,2.0],(0.0,6.3),1e6)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "One indicating factor that should alert you to the fact that this model may be stiff is the fact that the parameter is `1e6`: large parameters generally mean stiff models. If we try to solve this with the default method:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here it shows that maximum iterations were reached. Another thing that can happen is that the solution can return that the solver was unstable (exploded to infinity) or that `dt` became too small. If these happen, the first thing to do is to check that your model is correct. It could very well be that you made an error that causes the model to be unstable!\n\nIf the model is the problem, then stiffness could be the reason. We can thus hint to the solver to use an appropriate method:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,alg_hints = [:stiff])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Or we can use the default algorithm. By default, DifferentialEquations.jl uses algorithms like `AutoTsit5(Rodas5())` which automatically detect stiffness and switch to an appropriate method once stiffness is known." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Another way to understand stiffness is to look at the solution." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots; gr()\nsol = solve(prob,alg_hints = [:stiff],reltol=1e-6)\nplot(sol,denseplot=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's zoom in on the y-axis to see what's going on:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,ylims = (-10.0,10.0))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice how there are some extreme vertical shifts that occur. These vertical shifts are places where the derivative term is very large, and this is indicative of stiffness. This is an extreme example to highlight the behavior, but this general idea can be carried over to your problem. When in doubt, simply try timing using both a stiff solver and a non-stiff solver and see which is more efficient.\n\nTo try this out, let's use BenchmarkTools, a package that let's us relatively reliably time code blocks." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lorenz!(du,u,p,t)\n σ,ρ,β = p\n du[1] = σ*(u[2]-u[1])\n du[2] = u[1]*(ρ-u[3]) - u[2]\n du[3] = u[1]*u[2] - β*u[3]\nend\nu0 = [1.0,0.0,0.0]\np = (10,28,8/3)\ntspan = (0.0,100.0)\nprob = ODEProblem(lorenz!,u0,tspan,p)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "And now, let's use the `@btime` macro from benchmark tools to compare the use of non-stiff and stiff solvers on this problem." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using BenchmarkTools\n@btime solve(prob);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@btime solve(prob,alg_hints = [:stiff]);" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "In this particular case, we can see that non-stiff solvers get us to the solution much more quickly.\n\n## The Recommended Methods\n\nWhen picking a method, the general rules are as follows:\n\n- Higher order is more efficient at lower tolerances, lower order is more efficient at higher tolerances\n- Adaptivity is essential in most real-world scenarios\n- Runge-Kutta methods do well with non-stiff equations, Rosenbrock methods do well with small stiff equations, BDF methods do well with large stiff equations\n\nWhile there are always exceptions to the rule, those are good guiding principles. Based on those, a simple way to choose methods is:\n\n- The default is `Tsit5()`, a non-stiff Runge-Kutta method of Order 5\n- If you use low tolerances (`1e-8`), try `Vern7()` or `Vern9()`\n- If you use high tolerances, try `BS3()`\n- If the problem is stiff, try `Rosenbrock23()`, `Rodas5()`, or `CVODE_BDF()`\n- If you don't know, use `AutoTsit5(Rosenbrock23())` or `AutoVern9(Rodas5())`.\n\n(This is a simplified version of the default algorithm chooser)\n\n## Comparison to other Software\n\nIf you are familiar with MATLAB, SciPy, or R's DESolve, here's a quick translation start to have transfer your knowledge over.\n\n- `ode23` -> `BS3()`\n- `ode45`/`dopri5` -> `DP5()`, though in most cases `Tsit5()` is more efficient\n- `ode23s` -> `Rosenbrock23()`, though in most cases `Rodas4()` is more efficient\n- `ode113` -> `VCABM()`, though in many cases `Vern7()` is more efficient\n- `dop853` -> `DP8()`, though in most cases `Vern7()` is more efficient\n- `ode15s`/`vode` -> `QNDF()`, though in many cases `CVODE_BDF()`, `Rodas4()`\n or `radau()` are more efficient\n- `ode23t` -> `Trapezoid()` for efficiency and `GenericTrapezoid()` for robustness\n- `ode23tb` -> `TRBDF2`\n- `lsoda` -> `lsoda()` (requires `]add LSODA; using LSODA`)\n- `ode15i` -> `IDA()`, though in many cases `Rodas4()` can handle the DAE and is\n significantly more efficient" - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/introduction/formatting_plots.ipynb b/notebook/introduction/formatting_plots.ipynb deleted file mode 100644 index 7aad7138..00000000 --- a/notebook/introduction/formatting_plots.ipynb +++ /dev/null @@ -1,211 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "Since the plotting functionality is implemented as a recipe to Plots.jl, [all of the options open to Plots.jl can be used in our plots](https://juliaplots.github.io/supported/). In addition, there are special features specifically for [differential equation plots](http://docs.juliadiffeq.org/latest/basics/plot.html). This tutorial will teach some of the most commonly used options. Let's first get the solution to some ODE. Here I will use one of the Lorenz ordinary differential equation. As with all commands in DifferentialEquations.jl, I got a plot of the solution by calling `solve` on the problem, and `plot` on the solution:\n# Formatting Plots\n### Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, Plots, ParameterizedFunctions\ngr()\nlorenz = @ode_def Lorenz begin\n dx = σ*(y-x)\n dy = ρ*x-y-x*z\n dz = x*y-β*z\nend σ β ρ\n\np = [10.0,8/3,28]\nu0 = [1., 5., 10.]\ntspan = (0., 100.)\nprob = ODEProblem(lorenz, u0, tspan, p)\nsol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now let's change it to a phase plot. As discussed in the [plot functions page](http://docs.juliadiffeq.org/latest/basics/plot.html), we can use the `vars` command to choose the variables to plot. Let's plot variable `x` vs variable `y` vs variable `z`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(:x,:y,:z))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can also choose to plot the timeseries for a single variable:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=[:x])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that we were able to use the variable names because we had defined the problem with the macro. But in general, we can use the indices. The previous plots would be:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3))\nplot(sol,vars=[1])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Common options are to add titles, axis, and labels. For example:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,linewidth=5,title=\"Solution to the linear ODE with a thick line\",\nxaxis=\"Time (t)\",yaxis=\"u(t) (in mm)\",label=[\"X\",\"Y\",\"Z\"])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that series recipes apply to the solution type as well. For example, we can use a scatter plot on the timeseries:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "scatter(sol,vars=[:x])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This shows that the recipe is using the interpolation to smooth the plot. It becomes abundantly clear when we turn it off using `denseplot=false`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3),denseplot=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "When this is done, only the values the timestep hits are plotted. Using the interpolation usually results in a much nicer looking plot so it's recommended, and since the interpolations have similar orders to the numerical methods, their results are trustworthy on the full interval. We can control the number of points used in the interpolation's plot using the `plotdensity` command:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3),plotdensity=100)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "That's plotting the entire solution using 100 points spaced evenly in time." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3),plotdensity=10000)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "That's more like it! By default it uses `100*length(sol)`, where the length is the number of internal steps it had to take. This heuristic usually does well, but unusually difficult equations it can be relaxed (since it will take small steps), and for equations with events / discontinuities raising the plot density can help resolve the discontinuity.\n\nLastly notice that we can compose plots. Let's show where the 100 points are using a scatter plot:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3))\nscatter!(sol,vars=(1,2,3),plotdensity=100)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can instead work with an explicit plot object. This form can be better for building a complex plot in a loop." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "p = plot(sol,vars=(1,2,3))\nscatter!(p,sol,vars=(1,2,3),plotdensity=100)\ntitle!(\"I added a title\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "You can do all sorts of things. Have fun!" - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/introduction/ode_introduction.ipynb b/notebook/introduction/ode_introduction.ipynb deleted file mode 100644 index 8f2b7997..00000000 --- a/notebook/introduction/ode_introduction.ipynb +++ /dev/null @@ -1,716 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "## Basic Introduction Via Ordinary Differential Equations\n\nThis notebook will get you started with DifferentialEquations.jl by introducing you to the functionality for solving ordinary differential equations (ODEs). The corresponding documentation page is the [ODE tutorial](http://docs.juliadiffeq.org/latest/tutorials/ode_example.html). While some of the syntax may be different for other types of equations, the same general principles hold in each case. Our goal is to give a gentle and thorough introduction that highlights these principles in a way that will help you generalize what you have learned.\n\n### Background\n\nIf you are new to the study of differential equations, it can be helpful to do a quick background read on [the definition of ordinary differential equations](https://en.wikipedia.org/wiki/Ordinary_differential_equation). We define an ordinary differential equation as an equation which describes the way that a variable $u$ changes, that is\n\n$$u' = f(u,p,t)$$\n\nwhere $p$ are the parameters of the model, $t$ is the time variable, and $f$ is the nonlinear model of how $u$ changes. The initial value problem also includes the information about the starting value:\n\n$$u(t_0) = u_0$$\n\nTogether, if you know the starting value and you know how the value will change with time, then you know what the value will be at any time point in the future. This is the intuitive definition of a differential equation.\n\n### First Model: Exponential Growth\n\nOur first model will be the canonical exponential growth model. This model says that the rate of change is proportional to the current value, and is this:\n\n$$u' = au$$\n\nwhere we have a starting value $u(0)=u_0$. Let's say we put 1 dollar into Bitcoin which is increasing at a rate of $98\\%$ per year. Then calling now $t=0$ and measuring time in years, our model is:\n\n$$u' = 0.98u$$\n\nand $u(0) = 1.0$. We encode this into Julia by noticing that, in this setup, we match the general form when\n# An Intro to DifferentialEquations.jl\n### Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f(u,p,t) = 0.98u" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "with $ u_0 = 1.0 $. If we want to solve this model on a time span from `t=0.0` to `t=1.0`, then we define an `ODEProblem` by specifying this function `f`, this initial condition `u0`, and this time span as follows:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations\nf(u,p,t) = 0.98u\nu0 = 1.0\ntspan = (0.0,1.0)\nprob = ODEProblem(f,u0,tspan)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "To solve our `ODEProblem` we use the command `solve`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and that's it: we have succesfully solved our first ODE!\n\n#### Analyzing the Solution\n\nOf course, the solution type is not interesting in and of itself. We want to understand the solution! The documentation page which explains in detail the functions for analyzing the solution is the [Solution Handling](http://docs.juliadiffeq.org/latest/basics/solution.html) page. Here we will describe some of the basics. You can plot the solution using the plot recipe provided by [Plots.jl](http://docs.juliaplots.org/latest/):" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots; gr()\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "From the picture we see that the solution is an exponential curve, which matches our intuition. As a plot recipe, we can annotate the result using any of the [Plots.jl attributes](http://docs.juliaplots.org/latest/attributes/). For example:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,linewidth=5,title=\"Solution to the linear ODE with a thick line\",\n xaxis=\"Time (t)\",yaxis=\"u(t) (in μm)\",label=\"My Thick Line!\") # legend=false" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Using the mutating `plot!` command we can add other pieces to our plot. For this ODE we know that the true solution is $u(t) = u_0 exp(at)$, so let's add some of the true solution to our plot:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label=\"True Solution!\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "In the previous command I demonstrated `sol.t`, which grabs the array of time points that the solution was saved at:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol.t" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can get the array of solution values using `sol.u`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol.u" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "`sol.u[i]` is the value of the solution at time `sol.t[i]`. We can compute arrays of functions of the solution values using standard comprehensions, like:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "[t+u for (u,t) in tuples(sol)]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "However, one interesting feature is that, by default, the solution is a continuous function. If we check the print out again:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "you see that it says that the solution has a order changing interpolation. The default algorithm automatically switches between methods in order to handle all types of problems. For non-stiff equations (like the one we are solving), it is a continuous function of 4th order accuracy. We can call the solution as a function of time `sol(t)`. For example, to get the value at `t=0.45`, we can use the command:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol(0.45)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### Controlling the Solver\n\nDifferentialEquations.jl has a common set of solver controls among its algorithms which can be found [at the Common Solver Options](http://docs.juliadiffeq.org/latest/basics/common_solver_opts.html) page. We will detail some of the most widely used options.\n\nThe most useful options are the tolerances `abstol` and `reltol`. These tell the internal adaptive time stepping engine how precise of a solution you want. Generally, `reltol` is the relative accuracy while `abstol` is the accuracy when `u` is near zero. These tolerances are local tolerances and thus are not global guarantees. However, a good rule of thumb is that the total solution accuracy is 1-2 digits less than the relative tolerances. Thus for the defaults `abstol=1e-6` and `reltol=1e-3`, you can expect a global accuracy of about 1-2 digits. If we want to get around 6 digits of accuracy, we can use the commands:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,abstol=1e-8,reltol=1e-8)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now we can see no visible difference against the true solution:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol)\nplot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label=\"True Solution!\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that by decreasing the tolerance, the number of steps the solver had to take was `9` instead of the previous `5`. There is a trade off between accuracy and speed, and it is up to you to determine what is the right balance for your problem.\n\nAnother common option is to use `saveat` to make the solver save at specific time points. For example, if we want the solution at an even grid of `t=0.1k` for integers `k`, we would use the command:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,saveat=0.1)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that when `saveat` is used the continuous output variables are no longer saved and thus `sol(t)`, the interpolation, is only first order. We can save at an uneven grid of points by passing a collection of values to `saveat`. For example:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,saveat=[0.2,0.7,0.9])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "If we need to reduce the amount of saving, we can also turn off the continuous output directly via `dense=false`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,dense=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and to turn off all intermediate saving we can use `save_everystep=false`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "If we want to solve and only save the final value, we can even set `save_start=false`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,save_everystep=false,save_start = false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note that similarly on the other side there is `save_end=false`.\n\nMore advanced saving behaviors, such as saving functionals of the solution, are handled via the `SavingCallback` in the [Callback Library](http://docs.juliadiffeq.org/latest/features/callback_library.html#SavingCallback-1) which will be addressed later in the tutorial.\n\n#### Choosing Solver Algorithms\n\nThere is no best algorithm for numerically solving a differential equation. When you call `solve(prob)`, DifferentialEquations.jl makes a guess at a good algorithm for your problem, given the properties that you ask for (the tolerances, the saving information, etc.). However, in many cases you may want more direct control. A later notebook will help introduce the various *algorithms* in DifferentialEquations.jl, but for now let's introduce the *syntax*.\n\nThe most crucial determining factor in choosing a numerical method is the stiffness of the model. Stiffness is roughly characterized by a Jacobian `f` with large eigenvalues. That's quite mathematical, and we can think of it more intuitively: if you have big numbers in `f` (like parameters of order `1e5`), then it's probably stiff. Or, as the creator of the MATLAB ODE Suite, Lawrence Shampine, likes to define it, if the standard algorithms are slow, then it's stiff. We will go into more depth about diagnosing stiffness in a later tutorial, but for now note that if you believe your model may be stiff, you can hint this to the algorithm chooser via `alg_hints = [:stiff]`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,alg_hints=[:stiff])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Stiff algorithms have to solve implicit equations and linear systems at each step so they should only be used when required.\n\nIf we want to choose an algorithm directly, you can pass the algorithm type after the problem as `solve(prob,alg)`. For example, let's solve this problem using the `Tsit5()` algorithm, and just for show let's change the relative tolerance to `1e-6` at the same time:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5(),reltol=1e-6)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Systems of ODEs: The Lorenz Equation\n\nNow let's move to a system of ODEs. The [Lorenz equation](https://en.wikipedia.org/wiki/Lorenz_system) is the famous \"butterfly attractor\" that spawned chaos theory. It is defined by the system of ODEs:\n\n$$\n\\begin{align}\n\\frac{dx}{dt} &= \\sigma (y - x)\\\\\n\\frac{dy}{dt} &= x (\\rho - z) -y\\\\\n\\frac{dz}{dt} &= xy - \\beta z\n\\end{align}\n$$\n\nTo define a system of differential equations in DifferentialEquations.jl, we define our `f` as a vector function with a vector initial condition. Thus, for the vector `u = [x,y,z]'`, we have the derivative function:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lorenz!(du,u,p,t)\n σ,ρ,β = p\n du[1] = σ*(u[2]-u[1])\n du[2] = u[1]*(ρ-u[3]) - u[2]\n du[3] = u[1]*u[2] - β*u[3]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice here we used the in-place format which writes the output to the preallocated vector `du`. For systems of equations the in-place format is faster. We use the initial condition $u_0 = [1.0,0.0,0.0]$ as follows:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = [1.0,0.0,0.0]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Lastly, for this model we made use of the parameters `p`. We need to set this value in the `ODEProblem` as well. For our model we want to solve using the parameters $\\sigma = 10$, $\\rho = 28$, and $\\beta = 8/3$, and thus we build the parameter collection:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "p = (10,28,8/3) # we could also make this an array, or any other type!" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now we generate the `ODEProblem` type. In this case, since we have parameters, we add the parameter values to the end of the constructor call. Let's solve this on a time span of `t=0` to `t=100`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "tspan = (0.0,100.0)\nprob = ODEProblem(lorenz!,u0,tspan,p)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now, just as before, we solve the problem:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The same solution handling features apply to this case. Thus `sol.t` stores the time points and `sol.u` is an array storing the solution at the corresponding time points.\n\nHowever, there are a few extra features which are good to know when dealing with systems of equations. First of all, `sol` also acts like an array. `sol[i]` returns the solution at the `i`th time point." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol.t[10],sol[10]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Additionally, the solution acts like a matrix where `sol[j,i]` is the value of the `j`th variable at time `i`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol[2,10]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can get a real matrix by performing a conversion:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "A = Array(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This is the same as sol, i.e. `sol[i,j] = A[i,j]`, but now it's a true matrix. Plotting will by default show the time series for each variable:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "If we instead want to plot values against each other, we can use the `vars` command. Let's plot variable `1` against variable `2` against variable `3`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This is the classic Lorenz attractor plot, where the `x` axis is `u[1]`, the `y` axis is `u[2]`, and the `z` axis is `u[3]`. Note that the plot recipe by default uses the interpolation, but we can turn this off:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3),denseplot=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Yikes! This shows how calculating the continuous solution has saved a lot of computational effort by computing only a sparse solution and filling in the values! Note that in vars, `0=time`, and thus we can plot the time series of a single component like:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(0,2))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### A DSL for Parameterized Functions\n\nIn many cases you may be defining a lot of functions with parameters. There exists the domain-specific language (DSL) defined by the `@ode_def` macro for helping with this common problem. For example, we can define the Lotka-Volterra equation:\n\n$$\n\\begin{align}\n\\frac{dx}{dt} &= ax - bxy\\\\\n\\frac{dy}{dt} &= -cy + dxy\n\\end{align}\n$$\n\nas follows:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lotka_volterra!(du,u,p,t)\n du[1] = p[1]*u[1] - p[2]*u[1]*u[2]\n du[2] = -p[3]*u[2] + p[4]*u[1]*u[2]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "However, that can be hard to follow since there's a lot of \"programming\" getting in the way. Instead, you can use the `@ode_def` macro from ParameterizedFunctions.jl:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using ParameterizedFunctions\nlv! = @ode_def LotkaVolterra begin\n dx = a*x - b*x*y\n dy = -c*y + d*x*y\nend a b c d" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can then use the result just like an ODE function from before:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = [1.0,1.0]\np = (1.5,1.0,3.0,1.0)\ntspan = (0.0,10.0)\nprob = ODEProblem(lv!,u0,tspan,p)\nsol = solve(prob)\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Not only is the DSL convenient syntax, but it does some magic behind the scenes. For example, further parts of the tutorial will describe how solvers for stiff differential equations have to make use of the Jacobian in calculations. Here, the DSL uses symbolic differentiation to automatically derive that function:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "lv!.Jex" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The DSL can derive many other functions; this ability is used to speed up the solvers. An extension to DifferentialEquations.jl, [Latexify.jl](https://korsbo.github.io/Latexify.jl/latest/tutorials/parameterizedfunctions.html), allows you to extract these pieces as LaTeX expressions.\n\n## Internal Types\n\nThe last basic user-interface feature to explore is the choice of types. DifferentialEquations.jl respects your input types to determine the internal types that are used. Thus since in the previous cases, when we used `Float64` values for the initial condition, this meant that the internal values would be solved using `Float64`. We made sure that time was specified via `Float64` values, meaning that time steps would utilize 64-bit floats as well. But, by simply changing these types we can change what is used internally.\n\nAs a quick example, let's say we want to solve an ODE defined by a matrix. To do this, we can simply use a matrix as input." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "A = [1. 0 0 -5\n 4 -2 4 -3\n -4 0 0 1\n 5 -2 2 3]\nu0 = rand(4,2)\ntspan = (0.0,1.0)\nf(u,p,t) = A*u\nprob = ODEProblem(f,u0,tspan)\nsol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "There is no real difference from what we did before, but now in this case `u0` is a `4x2` matrix. Because of that, the solution at each time point is matrix:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol[3]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "In DifferentialEquations.jl, you can use any type that defines `+`, `-`, `*`, `/`, and has an appropriate `norm`. For example, if we want arbitrary precision floating point numbers, we can change the input to be a matrix of `BigFloat`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "big_u0 = big.(u0)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and we can solve the `ODEProblem` with arbitrary precision numbers by using that initial condition:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(f,big_u0,tspan)\nsol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol[1,3]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "To really make use of this, we would want to change `abstol` and `reltol` to be small! Notice that the type for \"time\" is different than the type for the dependent variables, and this can be used to optimize the algorithm via keeping multiple precisions. We can convert time to be arbitrary precision as well by defining our time span with `BigFloat` variables:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(f,big_u0,big.(tspan))\nsol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's end by showing a more complicated use of types. For small arrays, it's usually faster to do operations on static arrays via the package [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl). The syntax is similar to that of normal arrays, but for these special arrays we utilize the `@SMatrix` macro to indicate we want to create a static array." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using StaticArrays\nA = @SMatrix [ 1.0 0.0 0.0 -5.0\n 4.0 -2.0 4.0 -3.0\n -4.0 0.0 0.0 1.0\n 5.0 -2.0 2.0 3.0]\nu0 = @SMatrix rand(4,2)\ntspan = (0.0,1.0)\nf(u,p,t) = A*u\nprob = ODEProblem(f,u0,tspan)\nsol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol[3]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Conclusion\n\nThese are the basic controls in DifferentialEquations.jl. All equations are defined via a problem type, and the `solve` command is used with an algorithm choice (or the default) to get a solution. Every solution acts the same, like an array `sol[i]` with `sol.t[i]`, and also like a continuous function `sol(t)` with a nice plot command `plot(sol)`. The Common Solver Options can be used to control the solver for any equation type. Lastly, the types used in the numerical solving are determined by the input types, and this can be used to solve with arbitrary precision and add additional optimizations (this can be used to solve via GPUs for example!). While this was shown on ODEs, these techniques generalize to other types of equations as well." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/introduction/optimizing_diffeq_code.ipynb b/notebook/introduction/optimizing_diffeq_code.ipynb deleted file mode 100644 index 87bd790d..00000000 --- a/notebook/introduction/optimizing_diffeq_code.ipynb +++ /dev/null @@ -1,560 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "In this notebook we will walk through some of the main tools for optimizing your code in order to efficiently solve DifferentialEquations.jl. User-side optimizations are important because, for sufficiently difficult problems, most of the time will be spent inside of your `f` function, the function you are trying to solve. \"Efficient\" integrators are those that reduce the required number of `f` calls to hit the error tolerance. The main ideas for optimizing your DiffEq code, or any Julia function, are the following:\n\n- Make it non-allocating\n- Use StaticArrays for small arrays\n- Use broadcast fusion\n- Make it type-stable\n- Reduce redundant calculations\n- Make use of BLAS calls\n- Optimize algorithm choice\n\nWe'll discuss these strategies in the context of small and large systems. Let's start with small systems.\n\n## Optimizing Small Systems (<100 DEs)\n\nLet's take the classic Lorenz system from before. Let's start by naively writing the system in its out-of-place form:\n# Optimizing DiffEq Code\n### Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lorenz(u,p,t)\n dx = 10.0*(u[2]-u[1])\n dy = u[1]*(28.0-u[3]) - u[2]\n dz = u[1]*u[2] - (8/3)*u[3]\n [dx,dy,dz]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here, `lorenz` returns an object, `[dx,dy,dz]`, which is created within the body of `lorenz`.\n\nThis is a common code pattern from high-level languages like MATLAB, SciPy, or R's deSolve. However, the issue with this form is that it allocates a vector, `[dx,dy,dz]`, at each step. Let's benchmark the solution process with this choice of function:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, BenchmarkTools\nu0 = [1.0;0.0;0.0]\ntspan = (0.0,100.0)\nprob = ODEProblem(lorenz,u0,tspan)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The BenchmarkTools package's `@benchmark` runs the code multiple times to get an accurate measurement. The minimum time is the time it takes when your OS and other background processes aren't getting in the way. Notice that in this case it takes about 5ms to solve and allocates around 11.11 MiB. However, if we were to use this inside of a real user code we'd see a lot of time spent doing garbage collection (GC) to clean up all of the arrays we made. Even if we turn off saving we have these allocations." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark solve(prob,Tsit5(),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The problem of course is that arrays are created every time our derivative function is called. This function is called multiple times per step and is thus the main source of memory usage. To fix this, we can use the in-place form to ***make our code non-allocating***:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lorenz!(du,u,p,t)\n du[1] = 10.0*(u[2]-u[1])\n du[2] = u[1]*(28.0-u[3]) - u[2]\n du[3] = u[1]*u[2] - (8/3)*u[3]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here, instead of creating an array each time, we utilized the cache array `du`. When the inplace form is used, DifferentialEquations.jl takes a different internal route that minimizes the internal allocations as well. When we benchmark this function, we will see quite a difference." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = [1.0;0.0;0.0]\ntspan = (0.0,100.0)\nprob = ODEProblem(lorenz!,u0,tspan)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark solve(prob,Tsit5(),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "There is a 4x time difference just from that change! Notice there are still some allocations and this is due to the construction of the integration cache. But this doesn't scale with the problem size:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "tspan = (0.0,500.0) # 5x longer than before\nprob = ODEProblem(lorenz!,u0,tspan)\n@benchmark solve(prob,Tsit5(),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "since that's all just setup allocations.\n\n#### But if the system is small we can optimize even more.\n\nAllocations are only expensive if they are \"heap allocations\". For a more in-depth definition of heap allocations, [there are a lot of sources online](http://net-informations.com/faq/net/stack-heap.htm). But a good working definition is that heap allocations are variable-sized slabs of memory which have to be pointed to, and this pointer indirection costs time. Additionally, the heap has to be managed and the garbage controllers has to actively keep track of what's on the heap.\n\nHowever, there's an alternative to heap allocations, known as stack allocations. The stack is statically-sized (known at compile time) and thus its accesses are quick. Additionally, the exact block of memory is known in advance by the compiler, and thus re-using the memory is cheap. This means that allocating on the stack has essentially no cost!\n\nArrays have to be heap allocated because their size (and thus the amount of memory they take up) is determined at runtime. But there are structures in Julia which are stack-allocated. `struct`s for example are stack-allocated \"value-type\"s. `Tuple`s are a stack-allocated collection. The most useful data structure for DiffEq though is the `StaticArray` from the package [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl). These arrays have their length determined at compile-time. They are created using macros attached to normal array expressions, for example:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using StaticArrays\nA = @SVector [2.0,3.0,5.0]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that the `3` after `SVector` gives the size of the `SVector`. It cannot be changed. Additionally, `SVector`s are immutable, so we have to create a new `SVector` to change values. But remember, we don't have to worry about allocations because this data structure is stack-allocated. `SArray`s have a lot of extra optimizations as well: they have fast matrix multiplication, fast QR factorizations, etc. which directly make use of the information about the size of the array. Thus, when possible they should be used.\n\nUnfortunately static arrays can only be used for sufficiently small arrays. After a certain size, they are forced to heap allocate after some instructions and their compile time balloons. Thus static arrays shouldn't be used if your system has more than 100 variables. Additionally, only the native Julia algorithms can fully utilize static arrays.\n\nLet's ***optimize `lorenz` using static arrays***. Note that in this case, we want to use the out-of-place allocating form, but this time we want to output a static array:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lorenz_static(u,p,t)\n dx = 10.0*(u[2]-u[1])\n dy = u[1]*(28.0-u[3]) - u[2]\n dz = u[1]*u[2] - (8/3)*u[3]\n @SVector [dx,dy,dz]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "To make the solver internally use static arrays, we simply give it a static array as the initial condition:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = @SVector [1.0,0.0,0.0]\ntspan = (0.0,100.0)\nprob = ODEProblem(lorenz_static,u0,tspan)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark solve(prob,Tsit5(),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "And that's pretty much all there is to it. With static arrays you don't have to worry about allocating, so use operations like `*` and don't worry about fusing operations (discussed in the next section). Do \"the vectorized code\" of R/MATLAB/Python and your code in this case will be fast, or directly use the numbers/values.\n\n#### Exercise 1\n\nImplement the out-of-place array, in-place array, and out-of-place static array forms for the [Henon-Heiles System](https://en.wikipedia.org/wiki/H%C3%A9non%E2%80%93Heiles_system) and time the results.\n\n## Optimizing Large Systems\n\n### Interlude: Managing Allocations with Broadcast Fusion\n\nWhen your system is sufficiently large, or you have to make use of a non-native Julia algorithm, you have to make use of `Array`s. In order to use arrays in the most efficient manner, you need to be careful about temporary allocations. Vectorized calculations naturally have plenty of temporary array allocations. This is because a vectorized calculation outputs a vector. Thus:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "A = rand(1000,1000); B = rand(1000,1000); C = rand(1000,1000)\ntest(A,B,C) = A + B + C\n@benchmark test(A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "That expression `A + B + C` creates 2 arrays. It first creates one for the output of `A + B`, then uses that result array to `+ C` to get the final result. 2 arrays! We don't want that! The first thing to do to fix this is to use broadcast fusion. [Broadcast fusion](https://julialang.org/blog/2017/01/moredots) puts expressions together. For example, instead of doing the `+` operations separately, if we were to add them all at the same time, then we would only have a single array that's created. For example:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "test2(A,B,C) = map((a,b,c)->a+b+c,A,B,C)\n@benchmark test2(A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Puts the whole expression into a single function call, and thus only one array is required to store output. This is the same as writing the loop:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function test3(A,B,C)\n D = similar(A)\n @inbounds for i in eachindex(A)\n D[i] = A[i] + B[i] + C[i]\n end\n D\nend\n@benchmark test3(A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "However, Julia's broadcast is syntactic sugar for this. If multiple expressions have a `.`, then it will put those vectorized operations together. Thus:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "test4(A,B,C) = A .+ B .+ C\n@benchmark test4(A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "is a version with only 1 array created (the output). Note that `.`s can be used with function calls as well:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sin.(A) .+ sin.(B)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Also, the `@.` macro applys a dot to every operator:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "test5(A,B,C) = @. A + B + C #only one array allocated\n@benchmark test5(A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Using these tools we can get rid of our intermediate array allocations for many vectorized function calls. But we are still allocating the output array. To get rid of that allocation, we can instead use mutation. Mutating broadcast is done via `.=`. For example, if we pre-allocate the output:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "D = zeros(1000,1000);" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Then we can keep re-using this cache for subsequent calculations. The mutating broadcasting form is:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "test6!(D,A,B,C) = D .= A .+ B .+ C #only one array allocated\n@benchmark test6!(D,A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "If we use `@.` before the `=`, then it will turn it into `.=`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "test7!(D,A,B,C) = @. D = A + B + C #only one array allocated\n@benchmark test7!(D,A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that in this case, there is no \"output\", and instead the values inside of `D` are what are changed (like with the DiffEq inplace function). Many Julia functions have a mutating form which is denoted with a `!`. For example, the mutating form of the `map` is `map!`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "test8!(D,A,B,C) = map!((a,b,c)->a+b+c,D,A,B,C)\n@benchmark test8!(D,A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Some operations require using an alternate mutating form in order to be fast. For example, matrix multiplication via `*` allocates a temporary:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark A*B" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Instead, we can use the mutating form `mul!` into a cache array to avoid allocating the output:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using LinearAlgebra\n@benchmark mul!(D,A,B) # same as D = A * B" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "For repeated calculations this reduced allocation can stop GC cycles and thus lead to more efficient code. Additionally, ***we can fuse together higher level linear algebra operations using BLAS***. The package [SugarBLAS.jl](https://github.com/lopezm94/SugarBLAS.jl) makes it easy to write higher level operations like `alpha*B*A + beta*C` as mutating BLAS calls.\n\n### Example Optimization: Gierer-Meinhardt Reaction-Diffusion PDE Discretization\n\nLet's optimize the solution of a Reaction-Diffusion PDE's discretization. In its discretized form, this is the ODE:\n\n$$\n\\begin{align}\ndu &= D_1 (A_y u + u A_x) + \\frac{au^2}{v} + \\bar{u} - \\alpha u\\\\\ndv &= D_2 (A_y v + v A_x) + a u^2 + \\beta v\n\\end{align}\n$$\n\nwhere $u$, $v$, and $A$ are matrices. Here, we will use the simplified version where $A$ is the tridiagonal stencil $[1,-2,1]$, i.e. it's the 2D discretization of the LaPlacian. The native code would be something along the lines of:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Generate the constants\np = (1.0,1.0,1.0,10.0,0.001,100.0) # a,α,ubar,β,D1,D2\nN = 100\nAx = Array(Tridiagonal([1.0 for i in 1:N-1],[-2.0 for i in 1:N],[1.0 for i in 1:N-1]))\nAy = copy(Ax)\nAx[2,1] = 2.0\nAx[end-1,end] = 2.0\nAy[1,2] = 2.0\nAy[end,end-1] = 2.0\n\nfunction basic_version!(dr,r,p,t)\n a,α,ubar,β,D1,D2 = p\n u = r[:,:,1]\n v = r[:,:,2]\n Du = D1*(Ay*u + u*Ax)\n Dv = D2*(Ay*v + v*Ax)\n dr[:,:,1] = Du .+ a.*u.*u./v .+ ubar .- α*u\n dr[:,:,2] = Dv .+ a.*u.*u .- β*v\nend\n\na,α,ubar,β,D1,D2 = p\nuss = (ubar+β)/α\nvss = (a/β)*uss^2\nr0 = zeros(100,100,2)\nr0[:,:,1] .= uss.+0.1.*rand.()\nr0[:,:,2] .= vss\n\nprob = ODEProblem(basic_version!,r0,(0.0,0.1),p)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "In this version we have encoded our initial condition to be a 3-dimensional array, with `u[:,:,1]` being the `A` part and `u[:,:,2]` being the `B` part." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "While this version isn't very efficient,\n\n#### We recommend writing the \"high-level\" code first, and iteratively optimizing it!\n\nThe first thing that we can do is get rid of the slicing allocations. The operation `r[:,:,1]` creates a temporary array instead of a \"view\", i.e. a pointer to the already existing memory. To make it a view, add `@view`. Note that we have to be careful with views because they point to the same memory, and thus changing a view changes the original values:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "A = rand(4)\n@show A\nB = @view A[1:3]\nB[2] = 2\n@show A" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that changing `B` changed `A`. This is something to be careful of, but at the same time we want to use this since we want to modify the output `dr`. Additionally, the last statement is a purely element-wise operation, and thus we can make use of broadcast fusion there. Let's rewrite `basic_version!` to ***avoid slicing allocations*** and to ***use broadcast fusion***:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function gm2!(dr,r,p,t)\n a,α,ubar,β,D1,D2 = p\n u = @view r[:,:,1]\n v = @view r[:,:,2]\n du = @view dr[:,:,1]\n dv = @view dr[:,:,2]\n Du = D1*(Ay*u + u*Ax)\n Dv = D2*(Ay*v + v*Ax)\n @. du = Du + a.*u.*u./v + ubar - α*u\n @. dv = Dv + a.*u.*u - β*v\nend\nprob = ODEProblem(gm2!,r0,(0.0,0.1),p)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now, most of the allocations are taking place in `Du = D1*(Ay*u + u*Ax)` since those operations are vectorized and not mutating. We should instead replace the matrix multiplications with `mul!`. When doing so, we will need to have cache variables to write into. This looks like:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "Ayu = zeros(N,N)\nuAx = zeros(N,N)\nDu = zeros(N,N)\nAyv = zeros(N,N)\nvAx = zeros(N,N)\nDv = zeros(N,N)\nfunction gm3!(dr,r,p,t)\n a,α,ubar,β,D1,D2 = p\n u = @view r[:,:,1]\n v = @view r[:,:,2]\n du = @view dr[:,:,1]\n dv = @view dr[:,:,2]\n mul!(Ayu,Ay,u)\n mul!(uAx,u,Ax)\n mul!(Ayv,Ay,v)\n mul!(vAx,v,Ax)\n @. Du = D1*(Ayu + uAx)\n @. Dv = D2*(Ayv + vAx)\n @. du = Du + a*u*u./v + ubar - α*u\n @. dv = Dv + a*u*u - β*v\nend\nprob = ODEProblem(gm3!,r0,(0.0,0.1),p)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "But our temporary variables are global variables. We need to either declare the caches as `const` or localize them. We can localize them by adding them to the parameters, `p`. It's easier for the compiler to reason about local variables than global variables. ***Localizing variables helps to ensure type stability***." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "p = (1.0,1.0,1.0,10.0,0.001,100.0,Ayu,uAx,Du,Ayv,vAx,Dv) # a,α,ubar,β,D1,D2\nfunction gm4!(dr,r,p,t)\n a,α,ubar,β,D1,D2,Ayu,uAx,Du,Ayv,vAx,Dv = p\n u = @view r[:,:,1]\n v = @view r[:,:,2]\n du = @view dr[:,:,1]\n dv = @view dr[:,:,2]\n mul!(Ayu,Ay,u)\n mul!(uAx,u,Ax)\n mul!(Ayv,Ay,v)\n mul!(vAx,v,Ax)\n @. Du = D1*(Ayu + uAx)\n @. Dv = D2*(Ayv + vAx)\n @. du = Du + a*u*u./v + ubar - α*u\n @. dv = Dv + a*u*u - β*v\nend\nprob = ODEProblem(gm4!,r0,(0.0,0.1),p)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We could then use the BLAS `gemmv` to optimize the matrix multiplications some more, but instead let's devectorize the stencil." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "p = (1.0,1.0,1.0,10.0,0.001,100.0,N)\nfunction fast_gm!(du,u,p,t)\n a,α,ubar,β,D1,D2,N = p\n\n @inbounds for j in 2:N-1, i in 2:N-1\n du[i,j,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n end\n\n @inbounds for j in 2:N-1, i in 2:N-1\n du[i,j,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n end\n\n @inbounds for j in 2:N-1\n i = 1\n du[1,j,1] = D1*(2u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n end\n @inbounds for j in 2:N-1\n i = 1\n du[1,j,2] = D2*(2u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n end\n @inbounds for j in 2:N-1\n i = N\n du[end,j,1] = D1*(2u[i-1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n end\n @inbounds for j in 2:N-1\n i = N\n du[end,j,2] = D2*(2u[i-1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n end\n\n @inbounds for i in 2:N-1\n j = 1\n du[i,1,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n end\n @inbounds for i in 2:N-1\n j = 1\n du[i,1,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n end\n @inbounds for i in 2:N-1\n j = N\n du[i,end,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n end\n @inbounds for i in 2:N-1\n j = N\n du[i,end,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n end\n\n @inbounds begin\n i = 1; j = 1\n du[1,1,1] = D1*(2u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n du[1,1,2] = D2*(2u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n\n i = 1; j = N\n du[1,N,1] = D1*(2u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n du[1,N,2] = D2*(2u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n\n i = N; j = 1\n du[N,1,1] = D1*(2u[i-1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n du[N,1,2] = D2*(2u[i-1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n\n i = N; j = N\n du[end,end,1] = D1*(2u[i-1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n du[end,end,2] = D2*(2u[i-1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n end\nend\nprob = ODEProblem(fast_gm!,r0,(0.0,0.1),p)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Lastly, we can do other things like multithread the main loops, but these optimizations get the last 2x-3x out. The main optimizations which apply everywhere are the ones we just performed (though the last one only works if your matrix is a stencil. This is known as a matrix-free implementation of the PDE discretization).\n\nThis gets us to about 8x faster than our original MATLAB/SciPy/R vectorized style code!\n\nThe last thing to do is then ***optimize our algorithm choice***. We have been using `Tsit5()` as our test algorithm, but in reality this problem is a stiff PDE discretization and thus one recommendation is to use `CVODE_BDF()`. However, instead of using the default dense Jacobian, we should make use of the sparse Jacobian afforded by the problem. The Jacobian is the matrix $\\frac{df_i}{dr_j}$, where $r$ is read by the linear index (i.e. down columns). But since the $u$ variables depend on the $v$, the band size here is large, and thus this will not do well with a Banded Jacobian solver. Instead, we utilize sparse Jacobian algorithms. `CVODE_BDF` allows us to use a sparse Newton-Krylov solver by setting `linear_solver = :GMRES` (see [the solver documentation](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html#Sundials.jl-1), and thus we can solve this problem efficiently. Let's see how this scales as we increase the integration time." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(fast_gm!,r0,(0.0,10.0),p)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Sundials\n@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(fast_gm!,r0,(0.0,100.0),p)\n# Will go out of memory if we don't turn off `save_everystep`!\n@benchmark solve(prob,Tsit5(),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now let's check the allocation growth." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(fast_gm!,r0,(0.0,500.0),p)\n@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that we've elimated almost all allocations, allowing the code to grow without hitting garbage collection and slowing down.\n\nWhy is `CVODE_BDF` doing well? What's happening is that, because the problem is stiff, the number of steps required by the explicit Runge-Kutta method grows rapidly, whereas `CVODE_BDF` is taking large steps. Additionally, the `GMRES` linear solver form is quite an efficient way to solve the implicit system in this case. This is problem-dependent, and in many cases using a Krylov method effectively requires a preconditioner, so you need to play around with testing other algorithms and linear solvers to find out what works best with your problem.\n\n## Conclusion\n\nJulia gives you the tools to optimize the solver \"all the way\", but you need to make use of it. The main thing to avoid is temporary allocations. For small systems, this is effectively done via static arrays. For large systems, this is done via in-place operations and cache arrays. Either way, the resulting solution can be immensely sped up over vectorized formulations by using these principles." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/models/classical_physics.ipynb b/notebook/models/classical_physics.ipynb deleted file mode 100644 index 10a78be8..00000000 --- a/notebook/models/classical_physics.ipynb +++ /dev/null @@ -1,240 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "If you're getting some cold feet to jump in to DiffEq land, here are some handcrafted differential equations mini problems to hold your hand along the beginning of your journey.\n\n## Radioactive Decay of Carbon-14\n\n#### First order linear ODE\n\n$$f(t,u) = \\frac{du}{dt}$$\n\nThe Radioactive decay problem is the first order linear ODE problem of an exponential with a negative coefficient, which represents the half-life of the process in question. Should the coefficient be positive, this would represent a population growth equation.\n# Classical Physics Models\n### Yingbo Ma, Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using OrdinaryDiffEq, Plots\ngr()\n\n#Half-life of Carbon-14 is 5,730 years.\nC₁ = 5.730\n\n#Setup\nu₀ = 1.0\ntspan = (0.0, 1.0)\n\n#Define the problem\nradioactivedecay(u,p,t) = -C₁*u\n\n#Pass to solver\nprob = ODEProblem(radioactivedecay,u₀,tspan)\nsol = solve(prob,Tsit5())\n\n#Plot\nplot(sol,linewidth=2,title =\"Carbon-14 half-life\", xaxis = \"Time in thousands of years\", yaxis = \"Percentage left\", label = \"Numerical Solution\")\nplot!(sol.t, t->exp(-C₁*t),lw=3,ls=:dash,label=\"Analytical Solution\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Simple Pendulum\n\n#### Second Order Linear ODE\n\nWe will start by solving the pendulum problem. In the physics class, we often solve this problem by small angle approximation, i.e. $ sin(\\theta) \\approx \\theta$, because otherwise, we get an elliptic integral which doesn't have an analytic solution. The linearized form is\n\n$$\\ddot{\\theta} + \\frac{g}{L}{\\theta} = 0$$\n\nBut we have numerical ODE solvers! Why not solve the *real* pendulum?\n\n$$\\ddot{\\theta} + \\frac{g}{L}{\\sin(\\theta)} = 0$$" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Simple Pendulum Problem\nusing OrdinaryDiffEq, Plots\n\n#Constants\nconst g = 9.81\nL = 1.0\n\n#Initial Conditions\nu₀ = [0,π/2]\ntspan = (0.0,6.3)\n\n#Define the problem\nfunction simplependulum(du,u,p,t)\n θ = u[1]\n dθ = u[2]\n du[1] = dθ\n du[2] = -(g/L)*sin(θ)\nend\n\n#Pass to solvers\nprob = ODEProblem(simplependulum,u₀, tspan)\nsol = solve(prob,Tsit5())\n\n#Plot\nplot(sol,linewidth=2,title =\"Simple Pendulum Problem\", xaxis = \"Time\", yaxis = \"Height\", label = [\"Theta\",\"dTheta\"])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "So now we know that behaviour of the position versus time. However, it will be useful to us to look at the phase space of the pendulum, i.e., and representation of all possible states of the system in question (the pendulum) by looking at its velocity and position. Phase space analysis is ubiquitous in the analysis of dynamical systems, and thus we will provide a few facilities for it." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "p = plot(sol,vars = (1,2), xlims = (-9,9), title = \"Phase Space Plot\", xaxis = \"Velocity\", yaxis = \"Position\", leg=false)\nfunction phase_plot(prob, u0, p, tspan=2pi)\n _prob = ODEProblem(prob.f,u0,(0.0,tspan))\n sol = solve(_prob,Vern9()) # Use Vern9 solver for higher accuracy\n plot!(p,sol,vars = (1,2), xlims = nothing, ylims = nothing)\nend\nfor i in -4pi:pi/2:4π\n for j in -4pi:pi/2:4π\n phase_plot(prob, [j,i], p)\n end\nend\nplot(p,xlims = (-9,9))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Simple Harmonic Oscillator\n\n### Double Pendulum" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "#Double Pendulum Problem\nusing OrdinaryDiffEq, Plots\n\n#Constants and setup\nconst m₁, m₂, L₁, L₂ = 1, 2, 1, 2\ninitial = [0, π/3, 0, 3pi/5]\ntspan = (0.,50.)\n\n#Convenience function for transforming from polar to Cartesian coordinates\nfunction polar2cart(sol;dt=0.02,l1=L₁,l2=L₂,vars=(2,4))\n u = sol.t[1]:dt:sol.t[end]\n\n p1 = l1*map(x->x[vars[1]], sol.(u))\n p2 = l2*map(y->y[vars[2]], sol.(u))\n\n x1 = l1*sin.(p1)\n y1 = l1*-cos.(p1)\n (u, (x1 + l2*sin.(p2),\n y1 - l2*cos.(p2)))\nend\n\n#Define the Problem\nfunction double_pendulum(xdot,x,p,t)\n xdot[1]=x[2]\n xdot[2]=-((g*(2*m₁+m₂)*sin(x[1])+m₂*(g*sin(x[1]-2*x[3])+2*(L₂*x[4]^2+L₁*x[2]^2*cos(x[1]-x[3]))*sin(x[1]-x[3])))/(2*L₁*(m₁+m₂-m₂*cos(x[1]-x[3])^2)))\n xdot[3]=x[4]\n xdot[4]=(((m₁+m₂)*(L₁*x[2]^2+g*cos(x[1]))+L₂*m₂*x[4]^2*cos(x[1]-x[3]))*sin(x[1]-x[3]))/(L₂*(m₁+m₂-m₂*cos(x[1]-x[3])^2))\nend\n\n#Pass to Solvers\ndouble_pendulum_problem = ODEProblem(double_pendulum, initial, tspan)\nsol = solve(double_pendulum_problem, Vern7(), abs_tol=1e-10, dt=0.05);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "#Obtain coordinates in Cartesian Geometry\nts, ps = polar2cart(sol, l1=L₁, l2=L₂, dt=0.01)\nplot(ps...)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Poincaré section\n\nThe Poincaré section is a contour plot of a higher-dimensional phase space diagram. It helps to understand the dynamic interactions and is wonderfully pretty.\n\nThe following equation came from [StackOverflow question](https://mathematica.stackexchange.com/questions/40122/help-to-plot-poincar%C3%A9-section-for-double-pendulum)\n\n$$\\frac{d}{dt}\n \\begin{pmatrix}\n \\alpha \\\\ l_\\alpha \\\\ \\beta \\\\ l_\\beta\n \\end{pmatrix}=\n \\begin{pmatrix}\n 2\\frac{l_\\alpha - (1+\\cos\\beta)l_\\beta}{3-\\cos 2\\beta} \\\\\n -2\\sin\\alpha - \\sin(\\alpha + \\beta) \\\\\n 2\\frac{-(1+\\cos\\beta)l_\\alpha + (3+2\\cos\\beta)l_\\beta}{3-\\cos2\\beta}\\\\\n -\\sin(\\alpha+\\beta) - 2\\sin(\\beta)\\frac{(l_\\alpha-l_\\beta)l_\\beta}{3-\\cos2\\beta} + 2\\sin(2\\beta)\\frac{l_\\alpha^2-2(1+\\cos\\beta)l_\\alpha l_\\beta + (3+2\\cos\\beta)l_\\beta^2}{(3-\\cos2\\beta)^2}\n \\end{pmatrix}$$\n\nThe Poincaré section here is the collection of $(β,l_β)$ when $α=0$ and $\\frac{dα}{dt}>0$.\n\n#### Hamiltonian of a double pendulum\nNow we will plot the Hamiltonian of a double pendulum" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "#Constants and setup\nusing OrdinaryDiffEq\ninitial2 = [0.01, 0.005, 0.01, 0.01]\ntspan2 = (0.,200.)\n\n#Define the problem\nfunction double_pendulum_hamiltonian(udot,u,p,t)\n α = u[1]\n lα = u[2]\n β = u[3]\n lβ = u[4]\n udot .=\n [2(lα-(1+cos(β))lβ)/(3-cos(2β)),\n -2sin(α) - sin(α+β),\n 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)),\n -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)]\nend\n\n# Construct a ContiunousCallback\ncondition(u,t,integrator) = u[1]\naffect!(integrator) = nothing\ncb = ContinuousCallback(condition,affect!,nothing,\n save_positions = (true,false))\n\n# Construct Problem\npoincare = ODEProblem(double_pendulum_hamiltonian, initial2, tspan2)\nsol2 = solve(poincare, Vern9(), save_everystep = false, callback=cb, abstol=1e-9)\n\nfunction poincare_map(prob, u₀, p; callback=cb)\n _prob = ODEProblem(prob.f,[0.01, 0.01, 0.01, u₀],prob.tspan)\n sol = solve(_prob, Vern9(), save_everystep = false, callback=cb, abstol=1e-9)\n scatter!(p, sol, vars=(3,4), markersize = 2)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "p = scatter(sol2, vars=(3,4), leg=false, markersize = 2, ylims=(-0.01,0.03))\nfor i in -0.01:0.00125:0.01\n poincare_map(poincare, i, p)\nend\nplot(p,ylims=(-0.01,0.03))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Hénon-Heiles System\n\nThe Hénon-Heiles potential occurs when non-linear motion of a star around a galactic center with the motion restricted to a plane.\n\n$$\n\\begin{align}\n\\frac{d^2x}{dt^2}&=-\\frac{\\partial V}{\\partial x}\\\\\n\\frac{d^2y}{dt^2}&=-\\frac{\\partial V}{\\partial y}\n\\end{align}\n$$\n\nwhere\n\n$$V(x,y)={\\frac {1}{2}}(x^{2}+y^{2})+\\lambda \\left(x^{2}y-{\\frac {y^{3}}{3}}\\right).$$\n\nWe pick $\\lambda=1$ in this case, so\n\n$$V(x,y) = \\frac{1}{2}(x^2+y^2+2x^2y-\\frac{2}{3}y^3).$$\n\nThen the total energy of the system can be expressed by\n\n$$E = T+V = V(x,y)+\\frac{1}{2}(\\dot{x}^2+\\dot{y}^2).$$\n\nThe total energy should conserve as this system evolves." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using OrdinaryDiffEq, Plots\n\n#Setup\ninitial = [0.,0.1,0.5,0]\ntspan = (0,100.)\n\n#Remember, V is the potential of the system and T is the Total Kinetic Energy, thus E will\n#the total energy of the system.\nV(x,y) = 1//2 * (x^2 + y^2 + 2x^2*y - 2//3 * y^3)\nE(x,y,dx,dy) = V(x,y) + 1//2 * (dx^2 + dy^2);\n\n#Define the function\nfunction Hénon_Heiles(du,u,p,t)\n x = u[1]\n y = u[2]\n dx = u[3]\n dy = u[4]\n du[1] = dx\n du[2] = dy\n du[3] = -x - 2x*y\n du[4] = y^2 - y -x^2\nend\n\n#Pass to solvers\nprob = ODEProblem(Hénon_Heiles, initial, tspan)\nsol = solve(prob, Vern9(), abs_tol=1e-16, rel_tol=1e-16);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Plot the orbit\nplot(sol, vars=(1,2), title = \"The orbit of the Hénon-Heiles system\", xaxis = \"x\", yaxis = \"y\", leg=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "#Optional Sanity check - what do you think this returns and why?\n@show sol.retcode\n\n#Plot -\nplot(sol, vars=(1,3), title = \"Phase space for the Hénon-Heiles system\", xaxis = \"Position\", yaxis = \"Velocity\")\nplot!(sol, vars=(2,4), leg = false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "#We map the Total energies during the time intervals of the solution (sol.u here) to a new vector\n#pass it to the plotter a bit more conveniently\nenergy = map(x->E(x...), sol.u)\n\n#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great.\n@show ΔE = energy[1]-energy[end]\n\n#Plot\nplot(sol.t, energy, title = \"Change in Energy over Time\", xaxis = \"Time in iterations\", yaxis = \"Change in Energy\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Symplectic Integration\n\nTo prevent energy drift, we can instead use a symplectic integrator. We can directly define and solve the `SecondOrderODEProblem`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function HH_acceleration!(dv,v,u,p,t)\n x,y = u\n dx,dy = dv\n dv[1] = -x - 2x*y\n dv[2] = y^2 - y -x^2\nend\ninitial_positions = [0.0,0.1]\ninitial_velocities = [0.5,0.0]\nprob = SecondOrderODEProblem(HH_acceleration!,initial_velocities,initial_positions,tspan)\nsol2 = solve(prob, KahanLi8(), dt=1/10);" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that we get the same results:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Plot the orbit\nplot(sol2, vars=(3,4), title = \"The orbit of the Hénon-Heiles system\", xaxis = \"x\", yaxis = \"y\", leg=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol2, vars=(3,1), title = \"Phase space for the Hénon-Heiles system\", xaxis = \"Position\", yaxis = \"Velocity\")\nplot!(sol2, vars=(4,2), leg = false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "but now the energy change is essentially zero:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "energy = map(x->E(x[3], x[4], x[1], x[2]), sol2.u)\n#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great.\n@show ΔE = energy[1]-energy[end]\n\n#Plot\nplot(sol2.t, energy, title = \"Change in Energy over Time\", xaxis = \"Time in iterations\", yaxis = \"Change in Energy\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "It's so close to zero it breaks GR! And let's try to use a Runge-Kutta-Nyström solver to solve this. Note that Runge-Kutta-Nyström isn't symplectic." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol3 = solve(prob, DPRKN6());\nenergy = map(x->E(x[3], x[4], x[1], x[2]), sol3.u)\n@show ΔE = energy[1]-energy[end]\ngr()\nplot(sol3.t, energy, title = \"Change in Energy over Time\", xaxis = \"Time in iterations\", yaxis = \"Change in Energy\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note that we are using the `DPRKN6` sovler at `reltol=1e-3` (the default), yet it has a smaller energy variation than `Vern9` at `abs_tol=1e-16, rel_tol=1e-16`. Therefore, using specialized solvers to solve its particular problem is very efficient." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/models/conditional_dosing.ipynb b/notebook/models/conditional_dosing.ipynb deleted file mode 100644 index a5f40f59..00000000 --- a/notebook/models/conditional_dosing.ipynb +++ /dev/null @@ -1,140 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "In this example we will show how to model a conditional dosing using the `DiscreteCallbacks`. The problem is as follows. The patient has a drug `A(t)` in their system. The concentration of the drug is given as `C(t)=A(t)/V` for some volume constant `V`. At `t=4`, the patient goes to the clinic and is checked. If the concentration of the drug in their body is below `4`, then they will receive a new dose.\n\nFor our model, we will use the simple decay equation. We will write this in the in-place form to make it easy to extend to more complicated examples:\n# Conditional Dosing Pharmacometric Example\n### Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations\nfunction f(du,u,p,t)\n du[1] = -u[1]\nend\nu0 = [10.0]\nconst V = 1\nprob = ODEProblem(f,u0,(0.0,10.0))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's see what the solution looks like without any events." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5())\nusing Plots; gr()\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We see that at time `t=4`, the patient should receive a dose. Let's code up that event. We need to check at `t=4` if the concentration `u[1]/4` is `<4`, and if so, add `10` to `u[1]`. We do this with the following:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "condition(u,t,integrator) = t==4 && u[1]/V<4\naffect!(integrator) = integrator.u[1] += 10\ncb = DiscreteCallback(condition,affect!)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now we will give this callback to the solver, and tell it to stop at `t=4` so that way the condition can be checked:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb)\nusing Plots; gr()\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's show that it actually added 10 instead of setting the value to 10. We could have set the value using `affect!(integrator) = integrator.u[1] = 10`" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "println(sol(4.00000))\nprintln(sol(4.000000000001))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now let's model a patient whose decay rate for the drug is lower:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function f(du,u,p,t)\n du[1] = -u[1]/6\nend\nu0 = [10.0]\nconst V = 1\nprob = ODEProblem(f,u0,(0.0,10.0))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5())\nusing Plots; gr()\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Under the same criteria, with the same event, this patient will not receive a second dose:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb)\nusing Plots; gr()\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/models/diffeqbio_II_networkproperties.ipynb b/notebook/models/diffeqbio_II_networkproperties.ipynb deleted file mode 100644 index 3f5646dc..00000000 --- a/notebook/models/diffeqbio_II_networkproperties.ipynb +++ /dev/null @@ -1,6092 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# DiffEqBiological Tutorial II: Network Properties API\n", - "### Samuel Isaacson\n", - "\n", - "The [DiffEqBiological\n", - "API](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html) provides a\n", - "collection of functions for easily accessing network properties, and for\n", - "incrementally building and extending a network. In this tutorial we'll go\n", - "through the API, and then illustrate how to programmatically construct a\n", - "network.\n", - "\n", - "We'll illustrate the API using a toggle-switch like network that contains a\n", - "variety of different reaction types:" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "metadata": {}, - "outputs": [], - "source": [ - "using DifferentialEquations, DiffEqBiological, Latexify, Plots\n", - "fmt = :svg\n", - "pyplot(fmt=fmt)\n", - "rn = @reaction_network begin\n", - " hillr(D₂,α,K,n), ∅ --> m₁\n", - " hillr(D₁,α,K,n), ∅ --> m₂\n", - " (δ,γ), m₁ ↔ ∅\n", - " (δ,γ), m₂ ↔ ∅\n", - " β, m₁ --> m₁ + P₁\n", - " β, m₂ --> m₂ + P₂\n", - " μ, P₁ --> ∅\n", - " μ, P₂ --> ∅\n", - " (k₊,k₋), 2P₁ ↔ D₁ \n", - " (k₊,k₋), 2P₂ ↔ D₂\n", - " (k₊,k₋), P₁+P₂ ↔ T\n", - "end α K n δ γ β μ k₊ k₋;" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This corresponds to the chemical reaction network given by" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "data": { - "text/latex": [ - "\\begin{align}\n", - "\\require{mhchem}\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + D_2^{n}}] m_{1}}\\\\\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + D_1^{n}}] m_{2}}\\\\\n", - "\\ce{ m_{1} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{2} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{1} &->[\\beta] m_{1} + P_{1}}\\\\\n", - "\\ce{ m_{2} &->[\\beta] m_{2} + P_{2}}\\\\\n", - "\\ce{ P_{1} &->[\\mu] \\varnothing}\\\\\n", - "\\ce{ P_{2} &->[\\mu] \\varnothing}\\\\\n", - "\\ce{ 2 \\cdot P_1 &<=>[k_{+}][k_{-}] D_{1}}\\\\\n", - "\\ce{ 2 \\cdot P_2 &<=>[k_{+}][k_{-}] D_{2}}\\\\\n", - "\\ce{ P_{1} + P_{2} &<=>[k_{+}][k_{-}] T}\n", - "\\end{align}\n" - ], - "text/plain": [ - "L\"\\begin{align}\n", - "\\require{mhchem}\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + D_2^{n}}] m_{1}}\\\\\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + D_1^{n}}] m_{2}}\\\\\n", - "\\ce{ m_{1} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{2} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{1} &->[\\beta] m_{1} + P_{1}}\\\\\n", - "\\ce{ m_{2} &->[\\beta] m_{2} + P_{2}}\\\\\n", - "\\ce{ P_{1} &->[\\mu] \\varnothing}\\\\\n", - "\\ce{ P_{2} &->[\\mu] \\varnothing}\\\\\n", - "\\ce{ 2 \\cdot P_1 &<=>[k_{+}][k_{-}] D_{1}}\\\\\n", - "\\ce{ 2 \\cdot P_2 &<=>[k_{+}][k_{-}] D_{2}}\\\\\n", - "\\ce{ P_{1} + P_{2} &<=>[k_{+}][k_{-}] T}\n", - "\\end{align}\n", - "\"" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "latexify(rn; env=:chemical)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Network Properties\n", - "[Basic\n", - "properties](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Basic-properties-1)\n", - "of the generated network include the `speciesmap` and `paramsmap` functions we\n", - "examined in the last tutorial, along with the corresponding `species` and\n", - "`params` functions:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "7-element Array{Symbol,1}:\n", - " :m₁\n", - " :m₂\n", - " :P₁\n", - " :P₂\n", - " :D₁\n", - " :D₂\n", - " :T " - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "species(rn)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "9-element Array{Symbol,1}:\n", - " :α \n", - " :K \n", - " :n \n", - " :δ \n", - " :γ \n", - " :β \n", - " :μ \n", - " :k₊\n", - " :k₋" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "params(rn)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The numbers of species, parameters and reactions can be accessed using\n", - "`numspecies(rn)`, `numparams(rn)` and `numreactions(rn)`.\n", - "\n", - "A number of functions are available to access [properties of\n", - "reactions](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Reaction-Properties-1)\n", - "within the generated network, including `substrates`, `products`, `dependents`,\n", - "`ismassaction`, `substratestoich`, `substratesymstoich`, `productstoich`,\n", - "`productsymstoich`, and `netstoich`. Each of these functions takes two\n", - "arguments, the reaction network `rn` and the index of the reaction to query\n", - "information about. For example, to find the substrate symbols and their\n", - "corresponding stoichiometries for the 11th reaction, `2P₁ --> D₁`, we would use" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "1-element Array{DiffEqBiological.ReactantStruct,1}:\n", - " DiffEqBiological.ReactantStruct(:P₁, 2)" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "substratesymstoich(rn, 11)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Broadcasting works on all these functions, allowing the construction of a vector\n", - "holding the queried information across all reactions, i.e." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "16-element Array{Array{DiffEqBiological.ReactantStruct,1},1}:\n", - " [] \n", - " [] \n", - " [ReactantStruct(:m₁, 1)] \n", - " [] \n", - " [ReactantStruct(:m₂, 1)] \n", - " [] \n", - " [ReactantStruct(:m₁, 1)] \n", - " [ReactantStruct(:m₂, 1)] \n", - " [ReactantStruct(:P₁, 1)] \n", - " [ReactantStruct(:P₂, 1)] \n", - " [ReactantStruct(:P₁, 2)] \n", - " [ReactantStruct(:D₁, 1)] \n", - " [ReactantStruct(:P₂, 2)] \n", - " [ReactantStruct(:D₂, 1)] \n", - " [ReactantStruct(:P₁, 1), ReactantStruct(:P₂, 1)]\n", - " [ReactantStruct(:T, 1)] " - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "substratesymstoich.(rn, 1:numreactions(rn))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To see the net stoichiometries for all reactions we would use" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "16-element Array{Array{Pair{Int64,Int64},1},1}:\n", - " [1=>1] \n", - " [2=>1] \n", - " [1=>-1] \n", - " [1=>1] \n", - " [2=>-1] \n", - " [2=>1] \n", - " [3=>1] \n", - " [4=>1] \n", - " [3=>-1] \n", - " [4=>-1] \n", - " [3=>-2, 5=>1] \n", - " [3=>2, 5=>-1] \n", - " [4=>-2, 6=>1] \n", - " [4=>2, 6=>-1] \n", - " [3=>-1, 4=>-1, 7=>1]\n", - " [3=>1, 4=>1, 7=>-1] " - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "netstoich.(rn, 1:numreactions(rn))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here the first integer in each pair corresponds to the index of the species\n", - "(with symbol `species(rn)[index]`). The second integer corresponds to the net\n", - "stoichiometric coefficient of the species within the reaction. `substratestoich`\n", - "and `productstoich` are defined similarly. \n", - "\n", - "Several functions are also provided that calculate different types of\n", - "[dependency\n", - "graphs](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Dependency-Graphs-1).\n", - "These include `rxtospecies_depgraph`, which provides a mapping from reaction\n", - "index to the indices of species whose population changes when the reaction\n", - "occurs:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "16-element Array{Array{Int64,1},1}:\n", - " [1] \n", - " [2] \n", - " [1] \n", - " [1] \n", - " [2] \n", - " [2] \n", - " [3] \n", - " [4] \n", - " [3] \n", - " [4] \n", - " [3, 5] \n", - " [3, 5] \n", - " [4, 6] \n", - " [4, 6] \n", - " [3, 4, 7]\n", - " [3, 4, 7]" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "rxtospecies_depgraph(rn)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here the last row indicates that the species with indices `[3,4,7]` will change\n", - "values when the reaction `T --> P₁ + P₂` occurs. To confirm these are the\n", - "correct species we can look at" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "3-element Array{Symbol,1}:\n", - " :P₁\n", - " :P₂\n", - " :T " - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "species(rn)[[3,4,7]]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The `speciestorx_depgraph` similarly provides a mapping from species to reactions \n", - "for which their *rate laws* depend on that species. These correspond to all reactions\n", - "for which the given species is in the `dependent` set of the reaction. We can verify this\n", - "for the first species, `m₁`:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "2-element Array{Int64,1}:\n", - " 3\n", - " 7" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "speciestorx_depgraph(rn)[1]" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "2-element Array{Int64,1}:\n", - " 3\n", - " 7" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "findall(depset -> in(:m₁, depset), dependents.(rn, 1:numreactions(rn)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, `rxtorx_depgraph` provides a mapping that shows when a given reaction\n", - "occurs, which other reactions have rate laws that involve species whose value\n", - "would have changed:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "16-element Array{Array{Int64,1},1}:\n", - " [1, 3, 7] \n", - " [2, 5, 8] \n", - " [3, 7] \n", - " [3, 4, 7] \n", - " [5, 8] \n", - " [5, 6, 8] \n", - " [7, 9, 11, 15] \n", - " [8, 10, 13, 15] \n", - " [9, 11, 15] \n", - " [10, 13, 15] \n", - " [2, 9, 11, 12, 15] \n", - " [2, 9, 11, 12, 15] \n", - " [1, 10, 13, 14, 15] \n", - " [1, 10, 13, 14, 15] \n", - " [9, 10, 11, 13, 15, 16]\n", - " [9, 10, 11, 13, 15, 16]" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "rxtorx_depgraph(rn)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Note on Using Network Property API Functions\n", - "Many basic network query and reaction property functions are simply accessors,\n", - "returning information that is already stored within the generated\n", - "`reaction_network`. For these functions, modifying the returned data structures\n", - "may lead to inconsistent internal state within the network. As such, they should\n", - "be used for accessing, but not modifying, network properties. The [API\n", - "documentation](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html)\n", - "indicates which functions return newly allocated data structures and which\n", - "return data stored within the `reaction_network`.\n", - "\n", - "---\n", - "## Incremental Construction of Networks\n", - "The `@reaction_network` macro is monolithic, in that it not only constructs and\n", - "stores basic network properties such as the reaction stoichiometries, but also\n", - "generates **everything** needed to immediately solve ODE, SDE and jump models\n", - "using the network. This includes Jacobian functions, noise functions, and jump\n", - "functions for each reaction. While this allows for a compact interface to the\n", - "DifferentialEquations.jl solvers, it can also be computationally expensive for\n", - "large networks, where a user may only wish to solve one type of problem and/or\n", - "have fine-grained control over what is generated. In addition, some types of\n", - "reaction network structures are more amenable to being constructed\n", - "programmatically, as opposed to writing out all reactions by hand within one\n", - "macro. For these reasons DiffEqBiological provides two additional macros that\n", - "only *initially* setup basic reaction network properties, and which can be\n", - "extended through a programmatic interface: `@min_reaction_network` and\n", - "`@empty_reaction_network`. We now give an introduction to constructing these\n", - "more minimal network representations, and how they can be programmatically\n", - "extended. See also the relevant [API\n", - "section](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Reaction-Network-Generation-Macros-1).\n", - "\n", - "The `@min_reaction_network` macro works identically to the `@reaction_network`\n", - "macro, but the generated network will only be complete with respect to its\n", - "representation of chemical network properties (i.e. species, parameters and\n", - "reactions). No ODE, SDE or jump models are generated during the macro call. It\n", - "can subsequently be extended with the addition of new species, parameters or\n", - "reactions. The `@empty_reaction_network` allocates an empty network structure\n", - "that can also be extended using the programmatic interface. For example, consider\n", - "a partial version of the toggle-switch like network we defined above:" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "rnmin = @min_reaction_network begin\n", - " (δ,γ), m₁ ↔ ∅\n", - " (δ,γ), m₂ ↔ ∅\n", - " β, m₁ --> m₁ + P₁\n", - " β, m₂ --> m₂ + P₂\n", - " μ, P₁ --> ∅\n", - " μ, P₂ --> ∅\n", - "end δ γ β μ;" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here we have left out the first two, and last three, reactions from the original\n", - "`reaction_network`. To expand the network until it is functionally equivalent to\n", - "the original model we add back in the missing species, parameters, and *finally*\n", - "the missing reactions. Note, it is required that species and parameters be\n", - "defined before any reactions using them are added. The necessary network\n", - "extension functions are given by `addspecies!`, `addparam!` and `addreaction!`,\n", - "and described in the\n", - "[API](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Functions-to-Add-Species,-Parameters-and-Reactions-to-a-Network-1). To complete `rnmin` we first add the relevant\n", - "species:" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "addspecies!(rnmin, :D₁)\n", - "addspecies!(rnmin, :D₂)\n", - "addspecies!(rnmin, :T)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next we add the needed parameters" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [], - "source": [ - "addparam!(rnmin, :α)\n", - "addparam!(rnmin, :K)\n", - "addparam!(rnmin, :n)\n", - "addparam!(rnmin, :k₊)\n", - "addparam!(rnmin, :k₋)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note, both `addspecies!` and `addparam!` also accept strings encoding the\n", - "variable names (which are then converted to `Symbol`s internally).\n", - "\n", - "We are now ready to add the missing reactions. The API provides two forms of the\n", - "`addreaction!` function, one takes expressions analogous to what one would write\n", - "in the macro:" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "addreaction!(rnmin, :(hillr(D₁,α,K,n)), :(∅ --> m₂))\n", - "addreaction!(rnmin, :((k₊,k₋)), :(2P₂ ↔ D₂))\n", - "addreaction!(rnmin, :k₊, :(2P₁ --> D₁))\n", - "addreaction!(rnmin, :k₋, :(D₁ --> 2P₁))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The rate can be an expression or symbol as above, but can also just be a\n", - "numeric value. The second form of `addreaction!` takes tuples of\n", - "`Pair{Symbol,Int}` that encode the stoichiometric coefficients of substrates and\n", - "reactants:" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [], - "source": [ - "# signature is addreaction!(rnmin, paramexpr, substratestoich, productstoich)\n", - "addreaction!(rnmin, :(hillr(D₂,α,K,n)), (), (:m₁ => 1,))\n", - "addreaction!(rnmin, :k₊, (:P₁=>1, :P₂=>1), (:T=>1,))\n", - "addreaction!(rnmin, :k₋, (:T=>1,), (:P₁=>1, :P₂=>1))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's check that `rn` and `rnmin` have the same set of species:" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0-element Array{Symbol,1}" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "setdiff(species(rn), species(rnmin))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "the same set of params:" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0-element Array{Symbol,1}" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "setdiff(params(rn), params(rnmin))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "and the final reaction has the same substrates, reactions, and rate expression:" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0-element Array{Symbol,1}" - ] - }, - "execution_count": 20, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "rxidx = numreactions(rn)\n", - "setdiff(substrates(rn, rxidx), substrates(rnmin, rxidx))" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0-element Array{Symbol,1}" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "setdiff(products(rn, rxidx), products(rnmin, rxidx))" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "true" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "rateexpr(rn, rxidx) == rateexpr(rnmin, rxidx)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Extending Incrementally Generated Networks to Include ODEs, SDEs or Jumps\n", - "Once a network generated from `@min_reaction_network` or\n", - "`@empty_reaction_network` has had all the associated species, parameters and\n", - "reactions filled in, corresponding ODE, SDE or jump models can be constructed.\n", - "The relevant API functions are `addodes!`, `addsdes!` and `addjumps!`. One\n", - "benefit to contructing models with these functions is that they offer more\n", - "fine-grained control over what actually gets constructed. For example,\n", - "`addodes!` has the optional keyword argument, `build_jac`, which if set to\n", - "`false` will disable construction of symbolic Jacobians and functions for\n", - "evaluating Jacobians. For large networks this can give a significant speed-up in\n", - "the time required for constructing an ODE model. Each function and its\n", - "associated keyword arguments are described in the API section, [Functions to add\n", - "ODEs, SDEs or Jumps to a\n", - "Network](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Functions-to-Add-ODEs,-SDEs-or-Jumps-to-a-Network-1).\n", - "\n", - "Let's extend `rnmin` to include the needed functions for use in ODE\n", - "solvers:" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [], - "source": [ - "addodes!(rnmin)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The [Generated Functions for\n", - "Models](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Generated-Functions-for-Models-1)\n", - "section of the API shows what functions have been generated. For ODEs these\n", - "include `oderhsfun(rnmin)`, which returns a function of the form `f(du,u,p,t)`\n", - "which evaluates the ODEs (i.e. the time derivatives of `u`) within `du`. For\n", - "each generated function, the corresponding expressions from which it was\n", - "generated can be retrieved using accessors from the [Generated\n", - "Expressions](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Generated-Expressions-1)\n", - "section of the API. The equations within `du` can be retrieved using the\n", - "`odeexprs(rnmin)` function. For example:" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "7-element Array{Union{Float64, Int64, Expr, Symbol},1}:\n", - " :((-(δ * m₁) + γ) + (α * K ^ n) / (K ^ n + D₂ ^ n)) \n", - " :((-(δ * m₂) + γ) + (α * K ^ n) / (K ^ n + D₁ ^ n)) \n", - " :(((((β * m₁ - μ * P₁) + -2 * k₊ * (P₁ ^ 2 / 2)) + 2 * k₋ * D₁) - k₊ * P₁ * P₂) + k₋ * T)\n", - " :(((((β * m₂ - μ * P₂) + -2 * k₊ * (P₂ ^ 2 / 2)) + 2 * k₋ * D₂) - k₊ * P₁ * P₂) + k₋ * T)\n", - " :(k₊ * (P₁ ^ 2 / 2) - k₋ * D₁) \n", - " :(k₊ * (P₂ ^ 2 / 2) - k₋ * D₂) \n", - " :(k₊ * P₁ * P₂ - k₋ * T) " - ] - }, - "execution_count": 24, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "odeexprs(rnmin)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Using Latexify we can see the ODEs themselves to compare with these expressions:" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [ - { - "data": { - "text/latex": [ - "\\begin{align}\n", - "\\frac{dm_1}{dt} =& - \\delta \\cdot m_1 + \\gamma + \\frac{\\alpha \\cdot K^{n}}{K^{n} + D_2^{n}} \\\\\n", - "\\frac{dm_2}{dt} =& - \\delta \\cdot m_2 + \\gamma + \\frac{\\alpha \\cdot K^{n}}{K^{n} + D_1^{n}} \\\\\n", - "\\frac{dP_1}{dt} =& \\beta \\cdot m_1 - \\mu \\cdot P_1 -2 \\cdot k_+ \\cdot \\frac{P_1^{2}}{2} + 2 \\cdot k_- \\cdot D_1 - k_+ \\cdot P_1 \\cdot P_2 + k_- \\cdot T \\\\\n", - "\\frac{dP_2}{dt} =& \\beta \\cdot m_2 - \\mu \\cdot P_2 -2 \\cdot k_+ \\cdot \\frac{P_2^{2}}{2} + 2 \\cdot k_- \\cdot D_2 - k_+ \\cdot P_1 \\cdot P_2 + k_- \\cdot T \\\\\n", - "\\frac{dD_1}{dt} =& k_+ \\cdot \\frac{P_1^{2}}{2} - k_- \\cdot D_1 \\\\\n", - "\\frac{dD_2}{dt} =& k_+ \\cdot \\frac{P_2^{2}}{2} - k_- \\cdot D_2 \\\\\n", - "\\frac{dT}{dt} =& k_+ \\cdot P_1 \\cdot P_2 - k_- \\cdot T \\\\\n", - "\\end{align}\n" - ], - "text/plain": [ - "L\"\\begin{align}\n", - "\\frac{dm_1}{dt} =& - \\delta \\cdot m_1 + \\gamma + \\frac{\\alpha \\cdot K^{n}}{K^{n} + D_2^{n}} \\\\\n", - "\\frac{dm_2}{dt} =& - \\delta \\cdot m_2 + \\gamma + \\frac{\\alpha \\cdot K^{n}}{K^{n} + D_1^{n}} \\\\\n", - "\\frac{dP_1}{dt} =& \\beta \\cdot m_1 - \\mu \\cdot P_1 -2 \\cdot k_+ \\cdot \\frac{P_1^{2}}{2} + 2 \\cdot k_- \\cdot D_1 - k_+ \\cdot P_1 \\cdot P_2 + k_- \\cdot T \\\\\n", - "\\frac{dP_2}{dt} =& \\beta \\cdot m_2 - \\mu \\cdot P_2 -2 \\cdot k_+ \\cdot \\frac{P_2^{2}}{2} + 2 \\cdot k_- \\cdot D_2 - k_+ \\cdot P_1 \\cdot P_2 + k_- \\cdot T \\\\\n", - "\\frac{dD_1}{dt} =& k_+ \\cdot \\frac{P_1^{2}}{2} - k_- \\cdot D_1 \\\\\n", - "\\frac{dD_2}{dt} =& k_+ \\cdot \\frac{P_2^{2}}{2} - k_- \\cdot D_2 \\\\\n", - "\\frac{dT}{dt} =& k_+ \\cdot P_1 \\cdot P_2 - k_- \\cdot T \\\\\n", - "\\end{align}\n", - "\"" - ] - }, - "execution_count": 25, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "latexify(rnmin)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For ODEs two other functions are generated by `addodes!`. `jacfun(rnmin)` will\n", - "return the generated Jacobian evaluation function, `fjac(dJ,u,p,t)`, which given\n", - "the current solution `u` evaluates the Jacobian within `dJ`.\n", - "`jacobianexprs(rnmin)` gives the corresponding matrix of expressions, which can\n", - "be used with Latexify to see the Jacobian:" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "data": { - "text/latex": [ - "\\begin{equation}\n", - "\\left[\n", - "\\begin{array}{ccccccc}\n", - " - \\delta & 0 & 0 & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot D_2^{-1 + n}}{\\left( K^{n} + D_2^{n} \\right)^{2}} & 0 \\\\\n", - "0 & - \\delta & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot D_1^{-1 + n}}{\\left( K^{n} + D_1^{n} \\right)^{2}} & 0 & 0 \\\\\n", - "\\beta & 0 & - \\mu - 2 \\cdot k_+ \\cdot P_1 - k_+ \\cdot P_2 & - k_+ \\cdot P_1 & 2 \\cdot k_- & 0 & k_{-} \\\\\n", - "0 & \\beta & - k_+ \\cdot P_2 & - \\mu - k_+ \\cdot P_1 - 2 \\cdot k_+ \\cdot P_2 & 0 & 2 \\cdot k_- & k_{-} \\\\\n", - "0 & 0 & k_+ \\cdot P_1 & 0 & - k_- & 0 & 0 \\\\\n", - "0 & 0 & 0 & k_+ \\cdot P_2 & 0 & - k_- & 0 \\\\\n", - "0 & 0 & k_+ \\cdot P_2 & k_+ \\cdot P_1 & 0 & 0 & - k_- \\\\\n", - "\\end{array}\n", - "\\right]\n", - "\\end{equation}\n" - ], - "text/plain": [ - "L\"\\begin{equation}\n", - "\\left[\n", - "\\begin{array}{ccccccc}\n", - " - \\delta & 0 & 0 & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot D_2^{-1 + n}}{\\left( K^{n} + D_2^{n} \\right)^{2}} & 0 \\\\\n", - "0 & - \\delta & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot D_1^{-1 + n}}{\\left( K^{n} + D_1^{n} \\right)^{2}} & 0 & 0 \\\\\n", - "\\beta & 0 & - \\mu - 2 \\cdot k_+ \\cdot P_1 - k_+ \\cdot P_2 & - k_+ \\cdot P_1 & 2 \\cdot k_- & 0 & k_{-} \\\\\n", - "0 & \\beta & - k_+ \\cdot P_2 & - \\mu - k_+ \\cdot P_1 - 2 \\cdot k_+ \\cdot P_2 & 0 & 2 \\cdot k_- & k_{-} \\\\\n", - "0 & 0 & k_+ \\cdot P_1 & 0 & - k_- & 0 & 0 \\\\\n", - "0 & 0 & 0 & k_+ \\cdot P_2 & 0 & - k_- & 0 \\\\\n", - "0 & 0 & k_+ \\cdot P_2 & k_+ \\cdot P_1 & 0 & 0 & - k_- \\\\\n", - "\\end{array}\n", - "\\right]\n", - "\\end{equation}\n", - "\"" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "latexify(jacobianexprs(rnmin))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`addodes!` also generates a function that evaluates the Jacobian of the ODE\n", - "derivative functions with respect to the parameters. `paramjacfun(rnmin)` then\n", - "returns the generated function. It has the form `fpjac(dPJ,u,p,t)`, which\n", - "given the current solution `u` evaluates the Jacobian matrix with respect to\n", - "parameters `p` within `dPJ`. For use in DifferentialEquations.jl solvers, an\n", - "[`ODEFunction`](http://docs.juliadiffeq.org/latest/features/performance_overloads.html)\n", - "representation of the ODEs is available from `odefun(rnmin)`. \n", - "\n", - "`addsdes!` and `addjumps!` work similarly to complete the network for use in\n", - "StochasticDiffEq and DiffEqJump solvers. \n", - "\n", - "#### Note on Using Generated Function and Expression API Functions\n", - "The generated functions and expressions accessible through the API require first\n", - "calling the appropriate `addodes!`, `addsdes` or `addjumps` function. These are\n", - "responsible for actually constructing the underlying functions and expressions.\n", - "The API accessors simply return already constructed functions and expressions\n", - "that are stored within the `reaction_network` structure.\n", - "\n", - "---\n", - "## Example of Generating a Network Programmatically\n", - "For a user directly typing in a reaction network, it is generally easier to use\n", - "the `@min_reaction_network` or `@reaction_network` macros to fully specify\n", - "reactions. However, for large, structured networks it can be much easier to\n", - "generate the network programmatically. For very large networks, with tens of\n", - "thousands of reactions, the form of `addreaction!` that uses stoichiometric\n", - "coefficients should be preferred as it offers substantially better performance.\n", - "To put together everything we've seen, let's generate the network corresponding\n", - "to a 1D continuous time random walk, approximating the diffusion of molecules\n", - "within an interval.\n", - "\n", - "The basic \"reaction\" network we wish to study is \n", - "\n", - "$$\n", - "u_1 \\leftrightarrows u_2 \\leftrightarrows u_3 \\cdots \\leftrightarrows u_{N}\n", - "$$\n", - "\n", - "for $N$ lattice sites on $[0,1]$. For $h = 1/N$ the lattice spacing, we'll\n", - "assume the rate molecules hop from their current site to any particular neighbor\n", - "is just $h^{-2}$. We can interpret this hopping process as a collection of\n", - "$2N-2$ \"reactions\", with the form $u_i \\to u_j$ for $j=i+1$ or $j=i-1$. We construct\n", - "the corresponding reaction network as follows. First we set values for the basic\n", - "parameters:" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0.015625" - ] - }, - "execution_count": 27, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "N = 64\n", - "h = 1 / N" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "then we create an empty network, and add each species" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [], - "source": [ - "rn = @empty_reaction_network\n", - "\n", - "for i = 1:N\n", - " addspecies!(rn, Symbol(:u, i))\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We next add one parameter `β`, which we will set equal to the hopping rate \n", - "of molecules, $h^{-2}$:" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [], - "source": [ - "addparam!(rn, :β)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, we add in the $2N-2$ possible hopping reactions:" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [], - "source": [ - "for i = 1:N\n", - " (i < N) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i+1)=>1,))\n", - " (i > 1) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i-1)=>1,))\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's first construct an ODE model for the network" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [], - "source": [ - "addodes!(rn)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now need to specify the initial condition, parameter vector and time interval\n", - "to solve on. We start with 10000 molecules placed at the center of the domain,\n", - "and setup an `ODEProblem` to solve:" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\u001b[36mODEProblem\u001b[0m with uType \u001b[36mArray{Float64,1}\u001b[0m and tType \u001b[36mFloat64\u001b[0m. In-place: \u001b[36mtrue\u001b[0m\n", - "timespan: (0.0, 0.01)\n", - "u0: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]" - ] - }, - "execution_count": 32, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "u₀ = zeros(N)\n", - "u₀[div(N,2)] = 10000\n", - "p = [1/(h*h)]\n", - "tspan = (0.,.01)\n", - "oprob = ODEProblem(rn, u₀, tspan, p)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We are now ready to solve the problem and plot the solution. Since we have\n", - "essentially generated a method of lines discretization of the diffusion equation\n", - "with a discontinuous initial condition, we'll use an A-L stable implicit ODE\n", - "solver, `KenCarp4`, and plot the solution at a few times:" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n" - ] - }, - "execution_count": 33, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "sol = solve(oprob, KenCarp4())\n", - "times = [0., .0001, .001, .01]\n", - "plt = plot()\n", - "for time in times\n", - " plot!(plt, 1:N, sol(time), fmt=fmt, xlabel=\"i\", ylabel=\"uᵢ\", label=string(\"t = \", time), lw=3)\n", - "end\n", - "plot(plt, ylims=(0.,10000.))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here we see the characteristic diffusion of molecules from the center of the\n", - "domain, resulting in a shortening and widening of the solution as $t$ increases.\n", - "\n", - "Let's now look at a stochastic chemical kinetics jump process version of the\n", - "model, where β gives the probability per time each molecule can hop from its\n", - "current lattice site to an individual neighboring site. We first add in the\n", - "jumps, disabling `regular_jumps` since they are not needed, and using the\n", - "`minimal_jumps` flag to construct a minimal representation of the needed jumps.\n", - "We then construct a `JumpProblem`, and use the Composition-Rejection Direct\n", - "method, `DirectCR`, to simulate the process of the molecules hopping about on\n", - "the lattice:" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "retcode: Default\n", - "Interpolation: Piecewise constant interpolation\n", - "t: 4-element Array{Float64,1}:\n", - " 0.0 \n", - " 0.0001\n", - " 0.001 \n", - " 0.01 \n", - "u: 4-element Array{Array{Int64,1},1}:\n", - " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0 … 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] \n", - " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0 … 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] \n", - " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0 … 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] \n", - " [3, 2, 2, 6, 8, 5, 12, 9, 13, 21 … 14, 16, 10, 11, 3, 3, 0, 2, 0, 3]" - ] - }, - "execution_count": 34, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "addjumps!(rn, build_regular_jumps=false, minimal_jumps=true)\n", - "\n", - "# make the initial condition integer valued \n", - "u₀ = zeros(Int, N)\n", - "u₀[div(N,2)] = 10000\n", - "\n", - "# setup and solve the problem\n", - "dprob = DiscreteProblem(rn, u₀, tspan, p)\n", - "jprob = JumpProblem(dprob, DirectCR(), rn, save_positions=(false,false))\n", - "jsol = solve(jprob, SSAStepper(), saveat=times)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now plot bar graphs showing the locations of the molecules at the same\n", - "set of times we examined the ODE solution. For comparison, we also plot the\n", - "corresponding ODE solutions (red lines) that we found:" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n" - ] - }, - "execution_count": 35, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "times = [0., .0001, .001, .01]\n", - "plts = []\n", - "for i = 1:4\n", - " b = bar(1:N, jsol[i], legend=false, fmt=fmt, xlabel=\"i\", ylabel=\"uᵢ\", title=string(\"t = \", times[i]))\n", - " plot!(b,sol(times[i]))\n", - " push!(plts,b)\n", - "end\n", - "plot(plts...)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Similar to the ODE solutions, we see that the molecules spread out and become\n", - "more and more well-mixed throughout the domain as $t$ increases. The simulation\n", - "results are noisy due to the finite numbers of molecules present in the\n", - "stochsatic simulation, but since the number of molecules is large they agree\n", - "well with the ODE solution at each time.\n", - "\n", - "---\n", - "## Getting Help\n", - "Have a question related to DiffEqBiological or this tutorial? Feel free to ask\n", - "in the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby).\n", - "If you think you've found a bug in DiffEqBiological, or would like to\n", - "request/discuss new functionality, feel free to open an issue on\n", - "[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check\n", - "there is no related issue already open). If you've found a bug in this tutorial,\n", - "or have a suggestion, feel free to open an issue on the [DiffEqTutorials Github\n", - "site](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull\n", - "request to DiffEqTutorials updating the tutorial!\n", - "\n", - "---" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Julia 1.1.0", - "language": "julia", - "name": "julia-1.1" - }, - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebook/models/diffeqbio_I_introduction.ipynb b/notebook/models/diffeqbio_I_introduction.ipynb deleted file mode 100644 index 6a98aa4f..00000000 --- a/notebook/models/diffeqbio_I_introduction.ipynb +++ /dev/null @@ -1,22913 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# DiffEqBiological Tutorial I: Introduction\n", - "### Samuel Isaacson\n", - "\n", - "DiffEqBiological.jl is a domain specific language (DSL) for writing chemical\n", - "reaction networks in Julia. The generated chemical reaction network model can\n", - "then be translated into a variety of mathematical models which can be solved\n", - "using components of the broader\n", - "[DifferentialEquations.jl](http://juliadiffeq.org/) ecosystem.\n", - "\n", - "In this tutorial we'll provide an introduction to using DiffEqBiological to\n", - "specify chemical reaction networks, and then to solve ODE, jump, tau-leaping and\n", - "SDE models generated from them. Let's start by using the DiffEqBiological\n", - "`reaction_network` macro to specify a simply chemical reaction network; the\n", - "well-known Repressilator. \n", - "\n", - "We first import the basic packages we'll need, and use Plots.jl for making\n", - "figures:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "# If not already installed, first hit \"]\" within a Julia REPL. Then type:\n", - "# add DifferentialEquations DiffEqBiological PyPlot Plots Latexify \n", - "\n", - "using DifferentialEquations, DiffEqBiological, Plots, Latexify\n", - "pyplot(fmt=:svg);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now construct the reaction network. The basic types of arrows and predefined\n", - "rate laws one can use are discussed in detail within the DiffEqBiological\n", - "[Chemical Reaction Models\n", - "documentation](http://docs.juliadiffeq.org/latest/models/biological.html). Here\n", - "we use a mix of first order, zero order and repressive Hill function rate laws.\n", - "Note, $\\varnothing$ corresponds to the empty state, and is used for zeroth order\n", - "production and first order degradation reactions:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "repressilator = @reaction_network begin\n", - " hillr(P₃,α,K,n), ∅ --> m₁\n", - " hillr(P₁,α,K,n), ∅ --> m₂\n", - " hillr(P₂,α,K,n), ∅ --> m₃\n", - " (δ,γ), m₁ ↔ ∅\n", - " (δ,γ), m₂ ↔ ∅\n", - " (δ,γ), m₃ ↔ ∅\n", - " β, m₁ --> m₁ + P₁\n", - " β, m₂ --> m₂ + P₂\n", - " β, m₃ --> m₃ + P₃\n", - " μ, P₁ --> ∅\n", - " μ, P₂ --> ∅\n", - " μ, P₃ --> ∅\n", - "end α K n δ γ β μ;" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can use Latexify to look at the corresponding reactions and understand the\n", - "generated rate laws for each reaction" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "data": { - "text/latex": [ - "\\begin{align}\n", - "\\require{mhchem}\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + P_3^{n}}] m_{1}}\\\\\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + P_1^{n}}] m_{2}}\\\\\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + P_2^{n}}] m_{3}}\\\\\n", - "\\ce{ m_{1} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{2} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{3} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{1} &->[\\beta] m_{1} + P_{1}}\\\\\n", - "\\ce{ m_{2} &->[\\beta] m_{2} + P_{2}}\\\\\n", - "\\ce{ m_{3} &->[\\beta] m_{3} + P_{3}}\\\\\n", - "\\ce{ P_{1} &->[\\mu] \\varnothing}\\\\\n", - "\\ce{ P_{2} &->[\\mu] \\varnothing}\\\\\n", - "\\ce{ P_{3} &->[\\mu] \\varnothing}\n", - "\\end{align}\n" - ], - "text/plain": [ - "L\"\\begin{align}\n", - "\\require{mhchem}\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + P_3^{n}}] m_{1}}\\\\\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + P_1^{n}}] m_{2}}\\\\\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + P_2^{n}}] m_{3}}\\\\\n", - "\\ce{ m_{1} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{2} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{3} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{1} &->[\\beta] m_{1} + P_{1}}\\\\\n", - "\\ce{ m_{2} &->[\\beta] m_{2} + P_{2}}\\\\\n", - "\\ce{ m_{3} &->[\\beta] m_{3} + P_{3}}\\\\\n", - "\\ce{ P_{1} &->[\\mu] \\varnothing}\\\\\n", - "\\ce{ P_{2} &->[\\mu] \\varnothing}\\\\\n", - "\\ce{ P_{3} &->[\\mu] \\varnothing}\n", - "\\end{align}\n", - "\"" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "latexify(repressilator; env=:chemical)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also use Latexify to look at the corresponding ODE model for the chemical\n", - "system" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/latex": [ - "\\begin{align}\n", - "\\frac{dm_1}{dt} =& \\frac{\\alpha \\cdot K^{n}}{K^{n} + P_3^{n}} - \\delta \\cdot m_1 + \\gamma \\\\\n", - "\\frac{dm_2}{dt} =& \\frac{\\alpha \\cdot K^{n}}{K^{n} + P_1^{n}} - \\delta \\cdot m_2 + \\gamma \\\\\n", - "\\frac{dm_3}{dt} =& \\frac{\\alpha \\cdot K^{n}}{K^{n} + P_2^{n}} - \\delta \\cdot m_3 + \\gamma \\\\\n", - "\\frac{dP_1}{dt} =& \\beta \\cdot m_1 - \\mu \\cdot P_1 \\\\\n", - "\\frac{dP_2}{dt} =& \\beta \\cdot m_2 - \\mu \\cdot P_2 \\\\\n", - "\\frac{dP_3}{dt} =& \\beta \\cdot m_3 - \\mu \\cdot P_3 \\\\\n", - "\\end{align}\n" - ], - "text/plain": [ - "L\"\\begin{align}\n", - "\\frac{dm_1}{dt} =& \\frac{\\alpha \\cdot K^{n}}{K^{n} + P_3^{n}} - \\delta \\cdot m_1 + \\gamma \\\\\n", - "\\frac{dm_2}{dt} =& \\frac{\\alpha \\cdot K^{n}}{K^{n} + P_1^{n}} - \\delta \\cdot m_2 + \\gamma \\\\\n", - "\\frac{dm_3}{dt} =& \\frac{\\alpha \\cdot K^{n}}{K^{n} + P_2^{n}} - \\delta \\cdot m_3 + \\gamma \\\\\n", - "\\frac{dP_1}{dt} =& \\beta \\cdot m_1 - \\mu \\cdot P_1 \\\\\n", - "\\frac{dP_2}{dt} =& \\beta \\cdot m_2 - \\mu \\cdot P_2 \\\\\n", - "\\frac{dP_3}{dt} =& \\beta \\cdot m_3 - \\mu \\cdot P_3 \\\\\n", - "\\end{align}\n", - "\"" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "latexify(repressilator)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To solve the ODEs we need to specify the values of the parameters in the model,\n", - "the initial condition, and the time interval to solve the model on. To do this\n", - "it helps to know the orderings of the parameters and the species. Parameters are\n", - "ordered in the same order they appear after the `end` statement in the\n", - "`@reaction_network` macro. Species are ordered in the order they first appear\n", - "within the `@reaction_network` macro. We can see these orderings using the\n", - "`speciesmap` and `paramsmap` functions:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "OrderedCollections.OrderedDict{Symbol,Int64} with 6 entries:\n", - " :m₁ => 1\n", - " :m₂ => 2\n", - " :m₃ => 3\n", - " :P₁ => 4\n", - " :P₂ => 5\n", - " :P₃ => 6" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "speciesmap(repressilator)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "OrderedCollections.OrderedDict{Symbol,Int64} with 7 entries:\n", - " :α => 1\n", - " :K => 2\n", - " :n => 3\n", - " :δ => 4\n", - " :γ => 5\n", - " :β => 6\n", - " :μ => 7" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "paramsmap(repressilator)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Solving the ODEs:\n", - "Knowing these orderings, we can create parameter and initial condition vectors,\n", - "and setup the `ODEProblem` we want to solve:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\u001b[36mODEProblem\u001b[0m with uType \u001b[36mArray{Float64,1}\u001b[0m and tType \u001b[36mFloat64\u001b[0m. In-place: \u001b[36mtrue\u001b[0m\n", - "timespan: (0.0, 10000.0)\n", - "u0: [0.0, 0.0, 0.0, 20.0, 0.0, 0.0]" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# parameters [α,K,n,δ,γ,β,μ]\n", - "p = (.5, 40, 2, log(2)/120, 5e-3, 20*log(2)/120, log(2)/60)\n", - "\n", - "# initial condition [m₁,m₂,m₃,P₁,P₂,P₃]\n", - "u₀ = [0.,0.,0.,20.,0.,0.]\n", - "\n", - "# time interval to solve on\n", - "tspan = (0., 10000.)\n", - "\n", - "# create the ODEProblem we want to solve\n", - "oprob = ODEProblem(repressilator, u₀, tspan, p)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "At this point we are all set to solve the ODEs. We can now use any ODE solver\n", - "from within the DiffEq package. We'll just use the default DifferentialEquations\n", - "solver for now, and then plot the solutions:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "sol = solve(oprob, saveat=10.)\n", - "plot(sol, fmt=:svg)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We see the well-known oscillatory behavior of the repressilator! For more on\n", - "choices of ODE solvers, see the JuliaDiffEq\n", - "[documentation](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html).\n", - "\n", - "---\n", - "\n", - "## Stochastic Simulation Algorithms (SSAs) for Stochastic Chemical Kinetics\n", - "Let's now look at a stochastic chemical kinetics model of the repressilator,\n", - "modeling it with jump processes. Here we will construct a DiffEqJump\n", - "`JumpProblem` that uses Gillespie's `Direct` method, and then solve it to\n", - "generate one realization of the jump process:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# first we redefine the initial condition to be integer valued\n", - "u₀ = [0,0,0,20,0,0]\n", - "\n", - "# next we create a discrete problem to encode that our species are integer valued:\n", - "dprob = DiscreteProblem(repressilator, u₀, tspan, p)\n", - "\n", - "# now we create a JumpProblem, and specify Gillespie's Direct Method as the solver:\n", - "jprob = JumpProblem(dprob, Direct(), repressilator, save_positions=(false,false))\n", - "\n", - "# now let's solve and plot the jump process:\n", - "sol = solve(jprob, SSAStepper(), saveat=10.)\n", - "plot(sol, fmt=:svg)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here we see that oscillations remain, but become much noiser. Note, in\n", - "constructing the `JumpProblem` we could have used any of the SSAs that are part\n", - "of DiffEqJump instead of the `Direct` method, see the list of SSAs (i.e.\n", - "constant rate jump aggregators) in the\n", - "[documentation](http://docs.juliadiffeq.org/latest/types/jump_types.html#Constant-Rate-Jump-Aggregators-1).\n", - "\n", - "---\n", - "## $\\tau$-leaping Methods:\n", - "While SSAs generate exact realizations for stochastic chemical kinetics jump\n", - "process models, [$\\tau$-leaping](https://en.wikipedia.org/wiki/Tau-leaping)\n", - "methods offer a performant alternative by discretizing in time the underlying\n", - "time-change representation of the stochastic process. The DiffEqJump package has\n", - "limited support for $\\tau$-leaping methods in the form of the basic Euler's\n", - "method type approximation proposed by Gillespie. We can simulate a $\\tau$-leap\n", - "approximation to the repressilator by using the `RegularJump` representation of\n", - "the network to construct a `JumpProblem`:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "rjs = regularjumps(repressilator)\n", - "lprob = JumpProblem(dprob, Direct(), rjs)\n", - "lsol = solve(lprob, SimpleTauLeaping(), dt=.1)\n", - "plot(lsol, plotdensity=1000, fmt=:svg)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Chemical Langevin Equation (CLE) Stochastic Differential Equation (SDE) Models:\n", - "At an intermediary physical scale between macroscopic ODE models and microscopic\n", - "stochastic chemical kinetic models lies the CLE, a SDE version of the model. The\n", - "SDEs add to each ODE above a noise term. As the repressilator has species that\n", - "get very close to zero in size, it is not a good candidate to model with the CLE\n", - "(where solutions can then go negative and become unphysical). Let's create a\n", - "simpler reaction network for a birth-death process that will stay non-negative:" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "bdp = @reaction_network begin\n", - " c₁, X --> 2X\n", - " c₂, X --> 0\n", - " c₃, 0 --> X\n", - "end c₁ c₂ c₃\n", - "p = (1.0,2.0,50.)\n", - "u₀ = [5.]\n", - "tspan = (0.,4.);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The corresponding Chemical Langevin Equation SDE is then\n", - "\n", - "$$\n", - "dX_t = \\left(c_1 X - c_2 X + c_3 \\right) dt + \\left( \\sqrt{c_1 X} - \\sqrt{c_2 X} + \\sqrt{c_3} \\right)dW_t,\n", - "$$\n", - "\n", - "where $W_t$ denotes a standard Brownian Motion. We can solve the CLE SDE model\n", - "by creating an SDEProblem and solving it similar to what we did for ODEs above:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# SDEProblem for CLE\n", - "sprob = SDEProblem(bdp, u₀, tspan, p)\n", - "\n", - "# solve and plot, tstops is used to specify enough points \n", - "# that the plot looks well-resolved\n", - "sol = solve(sprob, tstops=range(0., step=4e-3, length=1001))\n", - "plot(sol, fmt=:svg)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We again have complete freedom to select any of the\n", - "StochasticDifferentialEquations.jl SDE solvers, see the\n", - "[documentation](http://docs.juliadiffeq.org/latest/solvers/sde_solve.html).\n", - "\n", - "---\n", - "## What information can be queried from the reaction_network:\n", - "The generated `reaction_network` contains a lot of basic information. For example\n", - "- `f=oderhsfun(repressilator)` is a function `f(du,u,p,t)` that given the current\n", - " state vector `u` and time `t` fills `du` with the time derivatives of `u`\n", - " (i.e. the right hand side of the ODEs).\n", - "- `jac=jacfun(repressilator)` is a function `jac(J,u,p,t)` that evaluates and\n", - " returns the Jacobian of the ODEs in `J`. A corresponding Jacobian matrix of\n", - " expressions can be accessed using the `jacobianexprs` function:" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "text/latex": [ - "\\begin{equation}\n", - "\\left[\n", - "\\begin{array}{cccccc}\n", - " - \\delta & 0 & 0 & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot P_3^{-1 + n}}{\\left( K^{n} + P_3^{n} \\right)^{2}} \\\\\n", - "0 & - \\delta & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot P_1^{-1 + n}}{\\left( K^{n} + P_1^{n} \\right)^{2}} & 0 & 0 \\\\\n", - "0 & 0 & - \\delta & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot P_2^{-1 + n}}{\\left( K^{n} + P_2^{n} \\right)^{2}} & 0 \\\\\n", - "\\beta & 0 & 0 & - \\mu & 0 & 0 \\\\\n", - "0 & \\beta & 0 & 0 & - \\mu & 0 \\\\\n", - "0 & 0 & \\beta & 0 & 0 & - \\mu \\\\\n", - "\\end{array}\n", - "\\right]\n", - "\\end{equation}\n" - ], - "text/plain": [ - "L\"\\begin{equation}\n", - "\\left[\n", - "\\begin{array}{cccccc}\n", - " - \\delta & 0 & 0 & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot P_3^{-1 + n}}{\\left( K^{n} + P_3^{n} \\right)^{2}} \\\\\n", - "0 & - \\delta & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot P_1^{-1 + n}}{\\left( K^{n} + P_1^{n} \\right)^{2}} & 0 & 0 \\\\\n", - "0 & 0 & - \\delta & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot P_2^{-1 + n}}{\\left( K^{n} + P_2^{n} \\right)^{2}} & 0 \\\\\n", - "\\beta & 0 & 0 & - \\mu & 0 & 0 \\\\\n", - "0 & \\beta & 0 & 0 & - \\mu & 0 \\\\\n", - "0 & 0 & \\beta & 0 & 0 & - \\mu \\\\\n", - "\\end{array}\n", - "\\right]\n", - "\\end{equation}\n", - "\"" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "latexify(jacobianexprs(repressilator))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "- `pjac = paramjacfun(repressilator)` is a function `pjac(pJ,u,p,t)` that\n", - " evaluates and returns the Jacobian, `pJ`, of the ODEs *with respect to the\n", - " parameters*. This allows `reaction_network`s to be used in the\n", - " DifferentialEquations.jl local sensitivity analysis package\n", - " [DiffEqSensitivity](http://docs.juliadiffeq.org/latest/analysis/sensitivity.html).\n", - "\n", - "\n", - "By default, generated `ODEProblems` will be passed the corresponding Jacobian\n", - "function, which will then be used within implicit ODE/SDE methods. \n", - "\n", - "The [DiffEqBiological API\n", - "documentation](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html) provides\n", - "a thorough description of the many query functions that are provided to access\n", - "network properties and generated functions. In DiffEqBiological Tutorial II\n", - "we'll explore the API.\n", - "\n", - "---\n", - "## Getting Help\n", - "Have a question related to DiffEqBiological or this tutorial? Feel free to ask\n", - "in the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby).\n", - "If you think you've found a bug in DiffEqBiological, or would like to\n", - "request/discuss new functionality, feel free to open an issue on\n", - "[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check\n", - "there is no related issue already open). If you've found a bug in this tutorial,\n", - "or have a suggestion, feel free to open an issue on the [DiffEqTutorials Github\n", - "site](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull\n", - "request to DiffEqTutorials updating the tutorial!\n", - "\n", - "---" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Julia 1.1.0", - "language": "julia", - "name": "julia-1.1" - }, - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebook/models/kepler_problem.ipynb b/notebook/models/kepler_problem.ipynb deleted file mode 100644 index af3f1977..00000000 --- a/notebook/models/kepler_problem.ipynb +++ /dev/null @@ -1,179 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "The Hamiltonian $\\mathcal {H}$ and the angular momentum $L$ for the Kepler problem are\n\n$$\\mathcal {H} = \\frac{1}{2}(\\dot{q}^2_1+\\dot{q}^2_2)-\\frac{1}{\\sqrt{q^2_1+q^2_2}},\\quad\nL = q_1\\dot{q_2} - \\dot{q_1}q_2$$\n\nAlso, we know that\n\n$${\\displaystyle {\\frac {\\mathrm {d} {\\boldsymbol {p}}}{\\mathrm {d} t}}=-{\\frac {\\partial {\\mathcal {H}}}{\\partial {\\boldsymbol {q}}}}\\quad ,\\quad {\\frac {\\mathrm {d} {\\boldsymbol {q}}}{\\mathrm {d} t}}=+{\\frac {\\partial {\\mathcal {H}}}{\\partial {\\boldsymbol {p}}}}}$$\n# Kepler Problem\n### Yingbo Ma, Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using OrdinaryDiffEq, LinearAlgebra, ForwardDiff, Plots; gr()\nH(q,p) = norm(p)^2/2 - inv(norm(q))\nL(q,p) = q[1]*p[2] - p[1]*q[2]\n\npdot(dp,p,q,params,t) = ForwardDiff.gradient!(dp, q->-H(q, p), q)\nqdot(dq,p,q,params,t) = ForwardDiff.gradient!(dq, p-> H(q, p), p)\n\ninitial_position = [.4, 0]\ninitial_velocity = [0., 2.]\ninitial_cond = (initial_position, initial_velocity)\ninitial_first_integrals = (H(initial_cond...), L(initial_cond...))\ntspan = (0,20.)\nprob = DynamicalODEProblem(pdot, qdot, initial_velocity, initial_position, tspan)\nsol = solve(prob, KahanLi6(), dt=1//10);" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's plot the orbit and check the energy and angular momentum variation. We know that energy and angular momentum should be constant, and they are also called first integrals." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot_orbit(sol) = plot(sol,vars=(3,4), lab=\"Orbit\", title=\"Kepler Problem Solution\")\n\nfunction plot_first_integrals(sol, H, L)\n plot(initial_first_integrals[1].-map(u->H(u[2,:], u[1,:]), sol.u), lab=\"Energy variation\", title=\"First Integrals\")\n plot!(initial_first_integrals[2].-map(u->L(u[2,:], u[1,:]), sol.u), lab=\"Angular momentum variation\")\nend\nanalysis_plot(sol, H, L) = plot(plot_orbit(sol), plot_first_integrals(sol, H, L))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "analysis_plot(sol, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's try to use a Runge-Kutta-Nyström solver to solve this problem and check the first integrals' variation." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol2 = solve(prob, DPRKN6()) # dt is not necessary, because unlike symplectic\n # integrators DPRKN6 is adaptive\n@show sol2.u |> length\nanalysis_plot(sol2, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's then try to solve the same problem by the `ERKN4` solver, which is specialized for sinusoid-like periodic function" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol3 = solve(prob, ERKN4()) # dt is not necessary, because unlike symplectic\n # integrators ERKN4 is adaptive\n@show sol3.u |> length\nanalysis_plot(sol3, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can see that `ERKN4` does a bad job for this problem, because this problem is not sinusoid-like.\n\nOne advantage of using `DynamicalODEProblem` is that it can implicitly convert the second order ODE problem to a *normal* system of first order ODEs, which is solvable for other ODE solvers. Let's use the `Tsit5` solver for the next example." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol4 = solve(prob, Tsit5())\n@show sol4.u |> length\nanalysis_plot(sol4, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### Note\n\nThere is drifting for all the solutions, and high order methods are drifting less because they are more accurate.\n\n### Conclusion\n\n---\n\nSymplectic integrator does not conserve the energy completely at all time, but the energy can come back. In order to make sure that the energy fluctuation comes back eventually, symplectic integrator has to have a fixed time step. Despite the energy variation, symplectic integrator conserves the angular momentum perfectly.\n\nBoth Runge-Kutta-Nyström and Runge-Kutta integrator do not conserve energy nor the angular momentum, and the first integrals do not tend to come back. An advantage Runge-Kutta-Nyström integrator over symplectic integrator is that RKN integrator can have adaptivity. An advantage Runge-Kutta-Nyström integrator over Runge-Kutta integrator is that RKN integrator has less function evaluation per step. The `ERKN4` solver works best for sinusoid-like solutions.\n\n## Manifold Projection\n\nIn this example, we know that energy and angular momentum should be conserved. We can achieve this through mainfold projection. As the name implies, it is a procedure to project the ODE solution to a manifold. Let's start with a base case, where mainfold projection isn't being used." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DiffEqCallbacks\n\nplot_orbit2(sol) = plot(sol,vars=(1,2), lab=\"Orbit\", title=\"Kepler Problem Solution\")\n\nfunction plot_first_integrals2(sol, H, L)\n plot(initial_first_integrals[1].-map(u->H(u[1:2],u[3:4]), sol.u), lab=\"Energy variation\", title=\"First Integrals\")\n plot!(initial_first_integrals[2].-map(u->L(u[1:2],u[3:4]), sol.u), lab=\"Angular momentum variation\")\nend\n\nanalysis_plot2(sol, H, L) = plot(plot_orbit2(sol), plot_first_integrals2(sol, H, L))\n\nfunction hamiltonian(du,u,params,t)\n q, p = u[1:2], u[3:4]\n qdot(@view(du[1:2]), p, q, params, t)\n pdot(@view(du[3:4]), p, q, params, t)\nend\n\nprob2 = ODEProblem(hamiltonian, [initial_position; initial_velocity], tspan)\nsol_ = solve(prob2, RK4(), dt=1//5, adaptive=false)\nanalysis_plot2(sol_, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "There is a significant fluctuation in the first integrals, when there is no mainfold projection." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function first_integrals_manifold(residual,u)\n residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4])\n residual[3:4] .= initial_first_integrals[2] - L(u[1:2], u[3:4])\nend\n\ncb = ManifoldProjection(first_integrals_manifold)\nsol5 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=cb)\nanalysis_plot2(sol5, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can see that thanks to the manifold projection, the first integrals' variation is very small, although we are using `RK4` which is not symplectic. But wait, what if we only project to the energy conservation manifold?" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function energy_manifold(residual,u)\n residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4])\n residual[3:4] .= 0\nend\nenergy_cb = ManifoldProjection(energy_manifold)\nsol6 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=energy_cb)\nanalysis_plot2(sol6, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "There is almost no energy variation but angular momentum varies quite bit. How about only project to the angular momentum conservation manifold?" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function angular_manifold(residual,u)\n residual[1:2] .= initial_first_integrals[2] - L(u[1:2], u[3:4])\n residual[3:4] .= 0\nend\nangular_cb = ManifoldProjection(angular_manifold)\nsol7 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=angular_cb)\nanalysis_plot2(sol7, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Again, we see what we expect." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/models/outer_solar_system.ipynb b/notebook/models/outer_solar_system.ipynb deleted file mode 100644 index 9622ba11..00000000 --- a/notebook/models/outer_solar_system.ipynb +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "## Data\n\nThe chosen units are: masses relative to the sun, so that the sun has mass $1$. We have taken $m_0 = 1.00000597682$ to take account of the inner planets. Distances are in astronomical units , times in earth days, and the gravitational constant is thus $G = 2.95912208286 \\cdot 10^{−4}$.\n\n| planet | mass | initial position | initial velocity |\n| --- | --- | --- | --- |\n| Jupiter | $m_1 = 0.000954786104043$ | | \n| Saturn | $m_2 = 0.000285583733151$ | | \n| Uranus | $m_3 = 0.0000437273164546$ | | \n| Neptune | $m_4 = 0.0000517759138449$ | | \n| Pluto | $ m_5 = 1/(1.3 \\cdot 10^8 )$ | | \n\nThe data is taken from the book \"Geometric Numerical Integration\" by E. Hairer, C. Lubich and G. Wanner.\n# The Outer Solar System\n### Yingbo Ma, Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots, OrdinaryDiffEq, DiffEqPhysics, RecursiveArrayTools\ngr()\n\nG = 2.95912208286e-4\nM = [1.00000597682, 0.000954786104043, 0.000285583733151, 0.0000437273164546, 0.0000517759138449, 1/1.3e8]\nplanets = [\"Sun\", \"Jupiter\", \"Saturn\", \"Uranus\", \"Neptune\", \"Pluto\"]\n\npos_x = [0.0,-3.5023653,9.0755314,8.3101420,11.4707666,-15.5387357]\npos_y = [0.0,-3.8169847,-3.0458353,-16.2901086,-25.7294829,-25.2225594]\npos_z = [0.0,-1.5507963,-1.6483708,-7.2521278,-10.8169456,-3.1902382]\npos = ArrayPartition(pos_x,pos_y,pos_z)\n\nvel_x = [0.0,0.00565429,0.00168318,0.00354178,0.00288930,0.00276725]\nvel_y = [0.0,-0.00412490,0.00483525,0.00137102,0.00114527,-0.00170702]\nvel_z = [0.0,-0.00190589,0.00192462,0.00055029,0.00039677,-0.00136504]\nvel = ArrayPartition(vel_x,vel_y,vel_z)\n\ntspan = (0.,200_000)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The N-body problem's Hamiltonian is\n\n$$H(p,q) = \\frac{1}{2}\\sum_{i=0}^{N}\\frac{p_{i}^{T}p_{i}}{m_{i}} - G\\sum_{i=1}^{N}\\sum_{j=0}^{i-1}\\frac{m_{i}m_{j}}{\\left\\lVert q_{i}-q_{j} \\right\\rVert}$$\n\nHere, we want to solve for the motion of the five outer planets relative to the sun, namely, Jupiter, Saturn, Uranus, Neptune and Pluto." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "const ∑ = sum\nconst N = 6\npotential(p, t, x, y, z, M) = -G*∑(i->∑(j->(M[i]*M[j])/sqrt((x[i]-x[j])^2 + (y[i]-y[j])^2 + (z[i]-z[j])^2), 1:i-1), 2:N)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Hamiltonian System\n\n`NBodyProblem` constructs a second order ODE problem under the hood. We know that a Hamiltonian system has the form of\n\n$$\\dot{p} = -H_{q}(p,q)\\quad \\dot{q}=H_{p}(p,q)$$\n\nFor an N-body system, we can symplify this as:\n\n$$\\dot{p} = -\\nabla{V}(q)\\quad \\dot{q}=M^{-1}p.$$\n\nThus $\\dot{q}$ is defined by the masses. We only need to define $\\dot{p}$, and this is done internally by taking the gradient of $V$. Therefore, we only need to pass the potential function and the rest is taken care of." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "nprob = NBodyProblem(potential, M, pos, vel, tspan)\nsol = solve(nprob,Yoshida6(), dt=100);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "orbitplot(sol,body_names=planets)" - ], - "metadata": {}, - "execution_count": null - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/ode_extras/feagin.ipynb b/notebook/ode_extras/feagin.ipynb deleted file mode 100644 index d098966c..00000000 --- a/notebook/ode_extras/feagin.ipynb +++ /dev/null @@ -1,115 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "DifferentialEquations.jl includes Feagin's explicit Runge-Kutta methods of orders 10/8, 12/10, and 14/12. These methods have such high order that it's pretty much required that one uses numbers with more precision than Float64. As a prerequisite reference on how to use arbitrary number systems (including higher precision) in the numerical solvers, please see the Solving Equations in With Chosen Number Types notebook.\n\n## Investigation of the Method's Error\n\nWe can use Feagin's order 16 method as follows. Let's use a two-dimensional linear ODE. Like in the Solving Equations in With Chosen Number Types notebook, we change the initial condition to BigFloats to tell the solver to use BigFloat types.\n# Feagin's Order 10, 12, and 14 Methods\n### Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations\nconst linear_bigα = big(1.01)\nf(u,p,t) = (linear_bigα*u)\n\n# Add analytical solution so that errors are checked\nf_analytic(u0,p,t) = u0*exp(linear_bigα*t)\nff = ODEFunction(f,analytic=f_analytic)\nprob = ODEProblem(ff,big(0.5),(0.0,1.0))\nsol = solve(prob,Feagin14(),dt=1//16,adaptive=false);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "println(sol.errors)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Compare that to machine $\\epsilon$ for Float64:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "eps(Float64)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The error for Feagin's method when the stepsize is 1/16 is 8 orders of magnitude below machine $\\epsilon$! However, that is dependent on the stepsize. If we instead use adaptive timestepping with the default tolerances, we get" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol =solve(prob,Feagin14());\nprintln(sol.errors); print(\"The length was $(length(sol))\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that when the stepsize is much higher, the error goes up quickly as well. These super high order methods are best when used to gain really accurate approximations (using still modest timesteps). Some examples of where such precision is necessary is astrodynamics where the many-body problem is highly chaotic and thus sensitive to small errors.\n\n## Convergence Test\n\nThe Order 14 method is awesome, but we need to make sure it's really that awesome. The following convergence test is used in the package tests in order to make sure the implementation is correct. Note that all methods have such tests in place." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DiffEqDevTools\ndts = 1.0 ./ 2.0 .^(10:-1:4)\nsim = test_convergence(dts,prob,Feagin14())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "For a view of what's going on, let's plot the simulation results." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots\ngr()\nplot(sim)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This is a clear trend indicating that the convergence is truly Order 14, which\nis the estimated slope." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/ode_extras/monte_carlo_parameter_estim.ipynb b/notebook/ode_extras/monte_carlo_parameter_estim.ipynb deleted file mode 100644 index 23bddf19..00000000 --- a/notebook/ode_extras/monte_carlo_parameter_estim.ipynb +++ /dev/null @@ -1,204 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "First you want to create a problem which solves multiple problems at the same time. This is the Monte Carlo Problem. When the parameter estimation tools say it will take any DEProblem, it really means ANY DEProblem!\n\nSo, let's get a Monte Carlo problem setup that solves with 10 different initial conditions.\n# Monte Carlo Parameter Estimation From Data\n### Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, DiffEqParamEstim, Plots, Optim\n\n# Monte Carlo Problem Set Up for solving set of ODEs with different initial conditions\n\n# Set up Lotka-Volterra system\nfunction pf_func(du,u,p,t)\n du[1] = p[1] * u[1] - p[2] * u[1]*u[2]\n du[2] = -3 * u[2] + u[1]*u[2]\nend\np = [1.5,1.0]\nprob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),p)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now for a MonteCarloProblem we have to take this problem and tell it what to do N times via the prob_func. So let's generate N=10 different initial conditions, and tell it to run the same problem but with these 10 different initial conditions each time:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Setting up to solve the problem N times (for the N different initial conditions)\nN = 10;\ninitial_conditions = [[1.0,1.0], [1.0,1.5], [1.5,1.0], [1.5,1.5], [0.5,1.0], [1.0,0.5], [0.5,0.5], [2.0,1.0], [1.0,2.0], [2.0,2.0]]\nfunction prob_func(prob,i,repeat)\n ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p)\nend\nmonte_prob = MonteCarloProblem(prob,prob_func=prob_func)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can check this does what we want by solving it:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Check above does what we want\nsim = solve(monte_prob,Tsit5(),num_monte=N)\nplot(sim)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "num_monte=N means \"run N times\", and each time it runs the problem returned by the prob_func, which is always the same problem but with the ith initial condition.\n\nNow let's generate a dataset from that. Let's get data points at every t=0.1 using saveat, and then convert the solution into an array." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Generate a dataset from these runs\ndata_times = 0.0:0.1:10.0\nsim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times)\ndata = Array(sim)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here, data[i,j,k] is the same as sim[i,j,k] which is the same as sim[k][i,j] (where sim[k] is the kth solution). So data[i,j,k] is the jth timepoint of the ith variable in the kth trajectory.\n\nNow let's build a loss function. A loss function is some loss(sol) that spits out a scalar for how far from optimal we are. In the documentation I show that we normally do loss = L2Loss(t,data), but we can bootstrap off of this. Instead lets build an array of N loss functions, each one with the correct piece of data." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Building a loss function\nlosses = [L2Loss(data_times,data[:,:,i]) for i in 1:N]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "So losses[i] is a function which computes the loss of a solution against the data of the ith trajectory. So to build our true loss function, we sum the losses:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "loss(sim) = sum(losses[i](sim[i]) for i in 1:N)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "As a double check, make sure that loss(sim) outputs zero (since we generated the data from sim). Now we generate data with other parameters:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),[1.2,0.8])\nfunction prob_func(prob,i,repeat)\n ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p)\nend\nmonte_prob = MonteCarloProblem(prob,prob_func=prob_func)\nsim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times)\nloss(sim)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and get a non-zero loss. So we now have our problem, our data, and our loss function... we have what we need.\n\nPut this into build_loss_objective." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N,\n saveat=data_times)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that I added the kwargs for solve into this. They get passed to an internal solve command, so then the loss is computed on N trajectories at data_times.\n\nThus we take this objective function over to any optimization package. I like to do quick things in Optim.jl. Here, since the Lotka-Volterra equation requires positive parameters, I use Fminbox to make sure the parameters stay positive. I start the optimization with [1.3,0.9], and Optim spits out that the true parameters are:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "lower = zeros(2)\nupper = fill(2.0,2)\nresult = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS()))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "result" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Optim finds one but not the other parameter.\n\nI would run a test on synthetic data for your problem before using it on real data. Maybe play around with different optimization packages, or add regularization. You may also want to decrease the tolerance of the ODE solvers via" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N,\n abstol=1e-8,reltol=1e-8,\n saveat=data_times)\nresult = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS()))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "result" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "if you suspect error is the problem. However, if you're having problems it's most likely not the ODE solver tolerance and mostly because parameter inference is a very hard optimization problem." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/ode_extras/ode_minmax.ipynb b/notebook/ode_extras/ode_minmax.ipynb deleted file mode 100644 index f27c0532..00000000 --- a/notebook/ode_extras/ode_minmax.ipynb +++ /dev/null @@ -1,197 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "### Setup\n\nIn this tutorial we will show how to use Optim.jl to find the maxima and minima of solutions. Let's take a look at the double pendulum:\n# Finding Maxima and Minima of DiffEq Solutions\n### Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "#Constants and setup\nusing OrdinaryDiffEq\ninitial = [0.01, 0.01, 0.01, 0.01]\ntspan = (0.,100.)\n\n#Define the problem\nfunction double_pendulum_hamiltonian(udot,u,p,t)\n α = u[1]\n lα = u[2]\n β = u[3]\n lβ = u[4]\n udot .=\n [2(lα-(1+cos(β))lβ)/(3-cos(2β)),\n -2sin(α) - sin(α+β),\n 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)),\n -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)]\nend\n\n#Pass to solvers\npoincare = ODEProblem(double_pendulum_hamiltonian, initial, tspan)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(poincare, Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "In time, the solution looks like:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots; gr()\nplot(sol, vars=[(0,3),(0,4)], leg=false, plotdensity=10000)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "while it has the well-known phase-space plot:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol, vars=(3,4), leg=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Local Optimization\n\nLet's fine out what some of the local maxima and minima are. Optim.jl can be used to minimize functions, and the solution type has a continuous interpolation which can be used. Let's look for the local optima for the 4th variable around `t=20`. Thus our optimization function is:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f = (t) -> sol(t,idxs=4)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "`first(t)` is the same as `t[1]` which transforms the array of size 1 into a number. `idxs=4` is the same as `sol(first(t))[4]` but does the calculation without a temporary array and thus is faster. To find a local minima, we can simply call Optim on this function. Let's find a local minimum:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Optim\nopt = optimize(f,18.0,22.0)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "From this printout we see that the minimum is at `t=18.63` and the value is `-2.79e-2`. We can get these in code-form via:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "println(opt.minimizer)\nprintln(opt.minimum)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "To get the maximum, we just minimize the negative of the function:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f = (t) -> -sol(first(t),idxs=4)\nopt2 = optimize(f,0.0,22.0)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's add the maxima and minima to the plots:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol, vars=(0,4), plotdensity=10000)\nscatter!([opt.minimizer],[opt.minimum],label=\"Local Min\")\nscatter!([opt2.minimizer],[-opt2.minimum],label=\"Local Max\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Brent's method will locally minimize over the full interval. If we instead want a local maxima nearest to a point, we can use `BFGS()`. In this case, we need to optimize a vector `[t]`, and thus dereference it to a number using `first(t)`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f = (t) -> -sol(first(t),idxs=4)\nopt = optimize(f,[20.0],BFGS())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Global Optimization\n\nIf we instead want to find global maxima and minima, we need to look somewhere else. For this there are many choices. A pure Julia option is BlackBoxOptim.jl, but I will use NLopt.jl. Following the NLopt.jl tutorial but replacing their function with out own:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "import NLopt, ForwardDiff\n\ncount = 0 # keep track of # function evaluations\n\nfunction g(t::Vector, grad::Vector)\n if length(grad) > 0\n #use ForwardDiff for the gradients\n grad[1] = ForwardDiff.derivative((t)->sol(first(t),idxs=4),t)\n end\n sol(first(t),idxs=4)\nend\nopt = NLopt.Opt(:GN_ORIG_DIRECT_L, 1)\nNLopt.lower_bounds!(opt, [0.0])\nNLopt.upper_bounds!(opt, [40.0])\nNLopt.xtol_rel!(opt,1e-8)\nNLopt.min_objective!(opt, g)\n(minf,minx,ret) = NLopt.optimize(opt,[20.0])\nprintln(minf,\" \",minx,\" \",ret)\nNLopt.max_objective!(opt, g)\n(maxf,maxx,ret) = NLopt.optimize(opt,[20.0])\nprintln(maxf,\" \",maxx,\" \",ret)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol, vars=(0,4), plotdensity=10000)\nscatter!([minx],[minf],label=\"Global Min\")\nscatter!([maxx],[maxf],label=\"Global Max\")" - ], - "metadata": {}, - "execution_count": null - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/test.ipynb b/notebook/test.ipynb deleted file mode 100644 index b1266b71..00000000 --- a/notebook/test.ipynb +++ /dev/null @@ -1,35 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "This is a test of the builder system.\n# Test\n### Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DiffEqTutorials\nDiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file])" - ], - "metadata": {}, - "execution_count": null - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/type_handling/number_types.ipynb b/notebook/type_handling/number_types.ipynb deleted file mode 100644 index 08747b07..00000000 --- a/notebook/type_handling/number_types.ipynb +++ /dev/null @@ -1,170 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "One of the nice things about DifferentialEquations.jl is that it is designed with Julia's type system in mind. What this means is, if you have properly defined a Number type, you can use this number type in DifferentialEquations.jl's algorithms! [Note that this is restricted to the native algorithms of OrdinaryDiffEq.jl. The other solvers such as ODE.jl, Sundials.jl, and ODEInterface.jl are not compatible with some number systems.]\n\nDifferentialEquations.jl determines the numbers to use in its solvers via the types that are designated by `tspan` and the initial condition of the problem. It will keep the time values in the same type as tspan, and the solution values in the same type as the initial condition. [Note that adaptive timestepping requires that the time type is compaible with `sqrt` and `^` functions. Thus dt cannot be Integer or numbers like that if adaptive timestepping is chosen].\n\nLet's solve the linear ODE first define an easy way to get ODEProblems for the linear ODE:\n# Solving Equations in With Julia-Defined Types\n### Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations\nf = (u,p,t) -> (p*u)\nprob_ode_linear = ODEProblem(f,1/2,(0.0,1.0),1.01);" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "First let's solve it using Float64s. To do so, we just need to set u0 to a Float64 (which is done by the default) and dt should be a float as well." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = prob_ode_linear\nsol =solve(prob,Tsit5())\nprintln(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that both the times and the solutions were saved as Float64. Let's change the time to use rational values. Rationals are not compatible with adaptive time stepping since they do not have an L2 norm (this can be worked around by defining `internalnorm`, but rationals already explode in size!). To account for this, let's turn off adaptivity as well:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(f,1/2,(0//1,1//1),101//100);\nsol = solve(prob,RK4(),dt=1//2^(6),adaptive=false)\nprintln(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now let's do something fun. Let's change the solution to use `Rational{BigInt}` and print out the value at the end of the simulation. To do so, simply change the definition of the initial condition." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(f,BigInt(1)//BigInt(2),(0//1,1//1),101//100);\nsol =solve(prob,RK4(),dt=1//2^(6),adaptive=false)\nprintln(sol[end])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "That's one huge fraction!\n\n## Other Compatible Number Types\n\n#### BigFloats" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob_ode_biglinear = ODEProblem(f,big(1.0)/big(2.0),(big(0.0),big(1.0)),big(1.01))\nsol =solve(prob_ode_biglinear,Tsit5())\nprintln(sol[end])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### DoubleFloats.jl\n\nThere's are Float128-like types. Higher precision, but fixed and faster than arbitrary precision." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DoubleFloats\nprob_ode_doublelinear = ODEProblem(f,Double64(1)/Double64(2),(Double64(0),Double64(1)),Double64(1.01))\nsol =solve(prob_ode_doublelinear,Tsit5())\nprintln(sol[end])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### ArbFloats\n\nThese high precision numbers which are much faster than Bigs for less than 500-800 bits of accuracy." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using ArbNumerics\nprob_ode_arbfloatlinear = ODEProblem(f,ArbFloat(1)/ArbFloat(2),(ArbFloat(0.0),ArbFloat(1.0)),ArbFloat(1.01))\nsol =solve(prob_ode_arbfloatlinear,Tsit5())\nprintln(sol[end])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Incompatible Number Systems\n\n#### DecFP.jl\n\nNext let's try DecFP. DecFP is a fixed-precision decimals library which is made to give both performance but known decimals of accuracy. Having already installed DecFP with `]add DecFP`, I can run the following:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DecFP\nprob_ode_decfplinear = ODEProblem(f,Dec128(1)/Dec128(2),(Dec128(0.0),Dec128(1.0)),Dec128(1.01))\nsol =solve(prob_ode_decfplinear,Tsit5())\nprintln(sol[end]); println(typeof(sol[end]))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### Decimals.jl\n\nInstall with `]add Decimals`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Decimals\nprob_ode_decimallinear = ODEProblem(f,[decimal(\"1.0\")]./[decimal(\"2.0\")],(0//1,1//1),decimal(1.01))\nsol =solve(prob_ode_decimallinear,RK4(),dt=1/2^(6)) #Fails\nprintln(sol[end]); println(typeof(sol[end]))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "At the time of writing this, Decimals are not compatible. This is not on DifferentialEquations.jl's end, it's on partly on Decimal's end since it is not a subtype of Number. Thus it's not recommended you use Decimals with DifferentialEquations.jl\n\n## Conclusion\n\nAs you can see, DifferentialEquations.jl can use arbitrary Julia-defined number systems in its arithmetic. If you need 128-bit floats, i.e. a bit more precision but not arbitrary, DoubleFloats.jl is a very good choice! For arbitrary precision, ArbNumerics are the most feature-complete and give great performance compared to BigFloats, and thus I recommend their use when high-precision (less than 512-800 bits) is required. DecFP is a great library for high-performance decimal numbers and works well as well. Other number systems could use some modernization." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/type_handling/uncertainties.ipynb b/notebook/type_handling/uncertainties.ipynb deleted file mode 100644 index 3d9822e6..00000000 --- a/notebook/type_handling/uncertainties.ipynb +++ /dev/null @@ -1,174 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "The result of a measurement should be given as a number with an attached uncertainties, besides the physical unit, and all operations performed involving the result of the measurement should propagate the uncertainty, taking care of correlation between quantities.\n\nThere is a Julia package for dealing with numbers with uncertainties: [`Measurements.jl`](https://github.com/JuliaPhysics/Measurements.jl). Thanks to Julia's features, `DifferentialEquations.jl` easily works together with `Measurements.jl` out-of-the-box.\n\nThis notebook will cover some of the examples from the tutorial about classical Physics.\n\n## Caveat about `Measurement` type\n\nBefore going on with the tutorial, we must point up a subtlety of `Measurements.jl` that you should be aware of:\n# Numbers with Uncertainties\n### Mosè Giordano, Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Measurements\n\n5.23 ± 0.14 === 5.23 ± 0.14" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "(5.23± 0.14) - (5.23 ± 0.14)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "(5.23 ± 0.14) / (5.23 ± 0.14)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The two numbers above, even though have the same nominal value and the same uncertainties, are actually two different measurements that only by chance share the same figures and their difference and their ratio have a non-zero uncertainty. It is common in physics to get very similar, or even equal, results for a repeated measurement, but the two measurements are not the same thing.\n\nInstead, if you have *one measurement* and want to perform some operations involving it, you have to assign it to a variable:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "x = 5.23 ± 0.14\nx === x" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "x - x" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "x / x" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Radioactive Decay of Carbon-14\n\nThe rate of decay of carbon-14 is governed by a first order linear ordinary differential equation\n\n$$\\frac{\\mathrm{d}u(t)}{\\mathrm{d}t} = -\\frac{u(t)}{\\tau}$$\n\nwhere $\\tau$ is the mean lifetime of carbon-14, which is related to the half-life $t_{1/2} = (5730 \\pm 40)$ years by the relation $\\tau = t_{1/2}/\\ln(2)$." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, Measurements, Plots\n\n# Half-life and mean lifetime of radiocarbon, in years\nt_12 = 5730 ± 40\nτ = t_12 / log(2)\n\n#Setup\nu₀ = 1 ± 0\ntspan = (0.0, 10000.0)\n\n#Define the problem\nradioactivedecay(u,p,t) = - u / τ\n\n#Pass to solver\nprob = ODEProblem(radioactivedecay, u₀, tspan)\nsol = solve(prob, Tsit5(), reltol = 1e-8)\n\n# Analytic solution\nu = exp.(- sol.t / τ)\n\nplot(sol.t, sol.u, label = \"Numerical\", xlabel = \"Years\", ylabel = \"Fraction of Carbon-14\")\nplot!(sol.t, u, label = \"Analytic\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The two curves are perfectly superimposed, indicating that the numerical solution matches the analytic one. We can check that also the uncertainties are correctly propagated in the numerical solution:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "println(\"Quantity of carbon-14 after \", sol.t[11], \" years:\")\nprintln(\"Numerical: \", sol[11])\nprintln(\"Analytic: \", u[11])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Both the value of the numerical solution and its uncertainty match the analytic solution within the requested tolerance. We can also note that close to 5730 years after the beginning of the decay (half-life of the radioisotope), the fraction of carbon-14 that survived is about 0.5.\n\n## Simple pendulum\n\n### Small angles approximation\n\nThe next problem we are going to study is the simple pendulum in the approximation of small angles. We address this simplified case because there exists an easy analytic solution to compare.\n\nThe differential equation we want to solve is\n\n$$\\ddot{\\theta} + \\frac{g}{L} \\theta = 0$$\n\nwhere $g = (9.79 \\pm 0.02)~\\mathrm{m}/\\mathrm{s}^2$ is the gravitational acceleration measured where the experiment is carried out, and $L = (1.00 \\pm 0.01)~\\mathrm{m}$ is the length of the pendulum.\n\nWhen you set up the problem for `DifferentialEquations.jl` remember to define the measurements as variables, as seen above." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, Measurements, Plots\n\ng = 9.79 ± 0.02; # Gravitational constants\nL = 1.00 ± 0.01; # Length of the pendulum\n\n#Initial Conditions\nu₀ = [0 ± 0, π / 60 ± 0.01] # Initial speed and initial angle\ntspan = (0.0, 6.3)\n\n#Define the problem\nfunction simplependulum(du,u,p,t)\n θ = u[1]\n dθ = u[2]\n du[1] = dθ\n du[2] = -(g/L)*θ\nend\n\n#Pass to solvers\nprob = ODEProblem(simplependulum, u₀, tspan)\nsol = solve(prob, Tsit5(), reltol = 1e-6)\n\n# Analytic solution\nu = u₀[2] .* cos.(sqrt(g / L) .* sol.t)\n\nplot(sol.t, getindex.(sol.u, 2), label = \"Numerical\")\nplot!(sol.t, u, label = \"Analytic\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Also in this case there is a perfect superimposition between the two curves, including their uncertainties.\n\nWe can also have a look at the difference between the two solutions:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol.t, getindex.(sol.u, 2) .- u, label = \"\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Arbitrary amplitude\n\nNow that we know how to solve differential equations involving numbers with uncertainties we can solve the simple pendulum problem without any approximation. This time the differential equation to solve is the following:\n\n$$\\ddot{\\theta} + \\frac{g}{L} \\sin(\\theta) = 0$$" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "g = 9.79 ± 0.02; # Gravitational constants\nL = 1.00 ± 0.01; # Length of the pendulum\n\n#Initial Conditions\nu₀ = [0 ± 0, π / 3 ± 0.02] # Initial speed and initial angle\ntspan = (0.0, 6.3)\n\n#Define the problem\nfunction simplependulum(du,u,p,t)\n θ = u[1]\n dθ = u[2]\n du[1] = dθ\n du[2] = -(g/L) * sin(θ)\nend\n\n#Pass to solvers\nprob = ODEProblem(simplependulum, u₀, tspan)\nsol = solve(prob, Tsit5(), reltol = 1e-6)\n\nplot(sol.t, getindex.(sol.u, 2), label = \"Numerical\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We note that in this case the period of the oscillations is not constant." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/type_handling/unitful.ipynb b/notebook/type_handling/unitful.ipynb deleted file mode 100644 index 3f6fdd8b..00000000 --- a/notebook/type_handling/unitful.ipynb +++ /dev/null @@ -1,163 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Unit Checked Arithmetic via Unitful.jl\n### Chris Rackauckas\n\nUnits and dimensional analysis are standard tools across the sciences for checking the correctness of your equation. However, most ODE solvers only allow for the equation to be in dimensionless form, leaving it up to the user to both convert the equation to a dimensionless form, punch in the equations, and hopefully not make an error along the way.\n\nDifferentialEquations.jl allows for one to use Unitful.jl to have unit-checked arithmetic natively in the solvers. Given the dispatch implementation of the Unitful, this has little overhead.\n\n## Using Unitful\n\nTo use Unitful, you need to have the package installed. Then you can add units to your variables. For example:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Unitful\nt = 1.0u\"s\"" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that `t` is a variable with units in seconds. If we make another value with seconds, they can add" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "t2 = 1.02u\"s\"\nt+t2" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and they can multiply:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "t*t2" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "You can even do rational roots:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sqrt(t)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Many operations work. These operations will check to make sure units are correct, and will throw an error for incorrect operations:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "t + sqrt(t)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Using Unitful with DifferentialEquations.jl\n\nJust like with other number systems, you can choose the units for your numbers by simply specifying the units of the initial condition and the timestep. For example, to solve the linear ODE where the variable has units of Newton's and `t` is in Seconds, we would use:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations\nf = (y,p,t) -> 0.5*y\nu0 = 1.5u\"N\"\nprob = ODEProblem(f,u0,(0.0u\"s\",1.0u\"s\"))\nsol = solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that we recieved a unit mismatch error. This is correctly so! Remember that for an ODE:\n\n$$\\frac{dy}{dt} = f(t,y)$$\n\nwe must have that `f` is a rate, i.e. `f` is a change in `y` per unit time. So we need to fix the units of `f` in our example to be `N/s`. Notice that we then do not receive an error if we do the following:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f = (y,p,t) -> 0.5*y/3.0u\"s\"\nprob = ODEProblem(f,u0,(0.0u\"s\",1.0u\"s\"))\nsol = solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This gives a a normal solution object. Notice that the values are all with the correct units:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "print(sol[:])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can plot the solution by removing the units:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots\ngr()\nplot(ustrip(sol.t),ustrip(sol[:]),lw=3)" - ], - "metadata": {}, - "execution_count": null - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/pdf/advanced/beeler_reuter.pdf b/pdf/advanced/beeler_reuter.pdf deleted file mode 100644 index b8e9c7d4..00000000 Binary files a/pdf/advanced/beeler_reuter.pdf and /dev/null differ diff --git a/pdf/introduction/callbacks_and_events.pdf b/pdf/introduction/callbacks_and_events.pdf deleted file mode 100644 index 6cb1d5ce..00000000 Binary files a/pdf/introduction/callbacks_and_events.pdf and /dev/null differ diff --git a/pdf/introduction/choosing_algs.pdf b/pdf/introduction/choosing_algs.pdf deleted file mode 100644 index af2413ce..00000000 Binary files a/pdf/introduction/choosing_algs.pdf and /dev/null differ diff --git a/pdf/introduction/formatting_plots.pdf b/pdf/introduction/formatting_plots.pdf deleted file mode 100644 index c5e4d480..00000000 Binary files a/pdf/introduction/formatting_plots.pdf and /dev/null differ diff --git a/pdf/introduction/ode_introduction.pdf b/pdf/introduction/ode_introduction.pdf deleted file mode 100644 index e689a2d3..00000000 Binary files a/pdf/introduction/ode_introduction.pdf and /dev/null differ diff --git a/pdf/introduction/optimizing_diffeq_code.pdf b/pdf/introduction/optimizing_diffeq_code.pdf deleted file mode 100644 index c48c4633..00000000 Binary files a/pdf/introduction/optimizing_diffeq_code.pdf and /dev/null differ diff --git a/pdf/models/classical_physics.pdf b/pdf/models/classical_physics.pdf deleted file mode 100644 index d442ce3f..00000000 Binary files a/pdf/models/classical_physics.pdf and /dev/null differ diff --git a/pdf/models/conditional_dosing.pdf b/pdf/models/conditional_dosing.pdf deleted file mode 100644 index 3b33cf38..00000000 Binary files a/pdf/models/conditional_dosing.pdf and /dev/null differ diff --git a/pdf/models/diffeqbio_II_networkproperties.pdf b/pdf/models/diffeqbio_II_networkproperties.pdf deleted file mode 100644 index 8e9f7058..00000000 Binary files a/pdf/models/diffeqbio_II_networkproperties.pdf and /dev/null differ diff --git a/pdf/models/diffeqbio_I_introduction.pdf b/pdf/models/diffeqbio_I_introduction.pdf deleted file mode 100644 index 9378ff9d..00000000 Binary files a/pdf/models/diffeqbio_I_introduction.pdf and /dev/null differ diff --git a/pdf/models/kepler_problem.pdf b/pdf/models/kepler_problem.pdf deleted file mode 100644 index aee7ff0f..00000000 Binary files a/pdf/models/kepler_problem.pdf and /dev/null differ diff --git a/pdf/ode_extras/feagin.pdf b/pdf/ode_extras/feagin.pdf deleted file mode 100644 index 6b61dba9..00000000 Binary files a/pdf/ode_extras/feagin.pdf and /dev/null differ diff --git a/pdf/ode_extras/monte_carlo_parameter_estim.pdf b/pdf/ode_extras/monte_carlo_parameter_estim.pdf deleted file mode 100644 index 4971a1f1..00000000 Binary files a/pdf/ode_extras/monte_carlo_parameter_estim.pdf and /dev/null differ diff --git a/pdf/ode_extras/ode_minmax.pdf b/pdf/ode_extras/ode_minmax.pdf deleted file mode 100644 index 57f1cf03..00000000 Binary files a/pdf/ode_extras/ode_minmax.pdf and /dev/null differ diff --git a/pdf/test.pdf b/pdf/test.pdf deleted file mode 100644 index 7a76a68f..00000000 Binary files a/pdf/test.pdf and /dev/null differ diff --git a/pdf/type_handling/number_types.pdf b/pdf/type_handling/number_types.pdf deleted file mode 100644 index bfc1eeb4..00000000 Binary files a/pdf/type_handling/number_types.pdf and /dev/null differ diff --git a/pdf/type_handling/uncertainties.pdf b/pdf/type_handling/uncertainties.pdf deleted file mode 100644 index 5801f249..00000000 Binary files a/pdf/type_handling/uncertainties.pdf and /dev/null differ diff --git a/pdf/type_handling/unitful.pdf b/pdf/type_handling/unitful.pdf deleted file mode 100644 index f62c66ea..00000000 Binary files a/pdf/type_handling/unitful.pdf and /dev/null differ diff --git a/script/advanced/beeler_reuter.jl b/script/advanced/beeler_reuter.jl deleted file mode 100644 index e855ea24..00000000 --- a/script/advanced/beeler_reuter.jl +++ /dev/null @@ -1,455 +0,0 @@ - -const v0 = -84.624 -const v1 = 10.0 -const C_K1 = 1.0f0 -const C_x1 = 1.0f0 -const C_Na = 1.0f0 -const C_s = 1.0f0 -const D_Ca = 0.0f0 -const D_Na = 0.0f0 -const g_s = 0.09f0 -const g_Na = 4.0f0 -const g_NaC = 0.005f0 -const ENa = 50.0f0 + D_Na -const γ = 0.5f0 -const C_m = 1.0f0 - - -mutable struct BeelerReuterCpu <: Function - t::Float64 # the last timestep time to calculate Δt - diff_coef::Float64 # the diffusion-coefficient (coupling strength) - - C::Array{Float32, 2} # intracellular calcium concentration - M::Array{Float32, 2} # sodium current activation gate (m) - H::Array{Float32, 2} # sodium current inactivation gate (h) - J::Array{Float32, 2} # sodium current slow inactivaiton gate (j) - D::Array{Float32, 2} # calcium current activaiton gate (d) - F::Array{Float32, 2} # calcium current inactivation gate (f) - XI::Array{Float32, 2} # inward-rectifying potassium current (iK1) - - Δu::Array{Float64, 2} # place-holder for the Laplacian - - function BeelerReuterCpu(u0, diff_coef) - self = new() - - ny, nx = size(u0) - self.t = 0.0 - self.diff_coef = diff_coef - - self.C = fill(0.0001f0, (ny,nx)) - self.M = fill(0.01f0, (ny,nx)) - self.H = fill(0.988f0, (ny,nx)) - self.J = fill(0.975f0, (ny,nx)) - self.D = fill(0.003f0, (ny,nx)) - self.F = fill(0.994f0, (ny,nx)) - self.XI = fill(0.0001f0, (ny,nx)) - - self.Δu = zeros(ny,nx) - - return self - end -end - - -# 5-point stencil -function laplacian(Δu, u) - n1, n2 = size(u) - - # internal nodes - for j = 2:n2-1 - for i = 2:n1-1 - @inbounds Δu[i,j] = u[i+1,j] + u[i-1,j] + u[i,j+1] + u[i,j-1] - 4*u[i,j] - end - end - - # left/right edges - for i = 2:n1-1 - @inbounds Δu[i,1] = u[i+1,1] + u[i-1,1] + 2*u[i,2] - 4*u[i,1] - @inbounds Δu[i,n2] = u[i+1,n2] + u[i-1,n2] + 2*u[i,n2-1] - 4*u[i,n2] - end - - # top/bottom edges - for j = 2:n2-1 - @inbounds Δu[1,j] = u[1,j+1] + u[1,j-1] + 2*u[2,j] - 4*u[1,j] - @inbounds Δu[n1,j] = u[n1,j+1] + u[n1,j-1] + 2*u[n1-1,j] - 4*u[n1,j] - end - - # corners - @inbounds Δu[1,1] = 2*(u[2,1] + u[1,2]) - 4*u[1,1] - @inbounds Δu[n1,1] = 2*(u[n1-1,1] + u[n1,2]) - 4*u[n1,1] - @inbounds Δu[1,n2] = 2*(u[2,n2] + u[1,n2-1]) - 4*u[1,n2] - @inbounds Δu[n1,n2] = 2*(u[n1-1,n2] + u[n1,n2-1]) - 4*u[n1,n2] -end - - -@inline function rush_larsen(g, α, β, Δt) - inf = α/(α+β) - τ = 1f0 / (α+β) - return clamp(g + (g - inf) * expm1(-Δt/τ), 0f0, 1f0) -end - - -function update_M_cpu(g, v, Δt) - # the condition is needed here to prevent NaN when v == 47.0 - α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (exp(-0.1f0*(v+47.0f0)) - 1.0f0) - β = (40.0f0 * exp(-0.056f0*(v+72.0f0))) - return rush_larsen(g, α, β, Δt) -end - -function update_H_cpu(g, v, Δt) - α = 0.126f0 * exp(-0.25f0*(v+77.0f0)) - β = 1.7f0 / (exp(-0.082f0*(v+22.5f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_J_cpu(g, v, Δt) - α = (0.55f0 * exp(-0.25f0*(v+78.0f0))) / (exp(-0.2f0*(v+78.0f0)) + 1.0f0) - β = 0.3f0 / (exp(-0.1f0*(v+32.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_D_cpu(g, v, Δt) - α = γ * (0.095f0 * exp(-0.01f0*(v-5.0f0))) / (exp(-0.072f0*(v-5.0f0)) + 1.0f0) - β = γ * (0.07f0 * exp(-0.017f0*(v+44.0f0))) / (exp(0.05f0*(v+44.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_F_cpu(g, v, Δt) - α = γ * (0.012f0 * exp(-0.008f0*(v+28.0f0))) / (exp(0.15f0*(v+28.0f0)) + 1.0f0) - β = γ * (0.0065f0 * exp(-0.02f0*(v+30.0f0))) / (exp(-0.2f0*(v+30.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_XI_cpu(g, v, Δt) - α = (0.0005f0 * exp(0.083f0*(v+50.0f0))) / (exp(0.057f0*(v+50.0f0)) + 1.0f0) - β = (0.0013f0 * exp(-0.06f0*(v+20.0f0))) / (exp(-0.04f0*(v+20.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - - -function update_C_cpu(g, d, f, v, Δt) - ECa = D_Ca - 82.3f0 - 13.0278f0 * log(g) - kCa = C_s * g_s * d * f - iCa = kCa * (v - ECa) - inf = 1.0f-7 * (0.07f0 - g) - τ = 1f0 / 0.07f0 - return g + (g - inf) * expm1(-Δt/τ) -end - - -function update_gates_cpu(u, XI, M, H, J, D, F, C, Δt) - let Δt = Float32(Δt) - n1, n2 = size(u) - for j = 1:n2 - for i = 1:n1 - v = Float32(u[i,j]) - - XI[i,j] = update_XI_cpu(XI[i,j], v, Δt) - M[i,j] = update_M_cpu(M[i,j], v, Δt) - H[i,j] = update_H_cpu(H[i,j], v, Δt) - J[i,j] = update_J_cpu(J[i,j], v, Δt) - D[i,j] = update_D_cpu(D[i,j], v, Δt) - F[i,j] = update_F_cpu(F[i,j], v, Δt) - - C[i,j] = update_C_cpu(C[i,j], D[i,j], F[i,j], v, Δt) - end - end - end -end - - -# iK1 is the inward-rectifying potassium current -function calc_iK1(v) - ea = exp(0.04f0*(v+85f0)) - eb = exp(0.08f0*(v+53f0)) - ec = exp(0.04f0*(v+53f0)) - ed = exp(-0.04f0*(v+23f0)) - return 0.35f0 * (4f0*(ea-1f0)/(eb + ec) - + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed))) -end - -# ix1 is the time-independent background potassium current -function calc_ix1(v, xi) - ea = exp(0.04f0*(v+77f0)) - eb = exp(0.04f0*(v+35f0)) - return xi * 0.8f0 * (ea-1f0) / eb -end - -# iNa is the sodium current (similar to the classic Hodgkin-Huxley model) -function calc_iNa(v, m, h, j) - return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa) -end - -# iCa is the calcium current -function calc_iCa(v, d, f, c) - ECa = D_Ca - 82.3f0 - 13.0278f0 * log(c) # ECa is the calcium reversal potential - return C_s * g_s * d * f * (v - ECa) -end - -function update_du_cpu(du, u, XI, M, H, J, D, F, C) - n1, n2 = size(u) - - for j = 1:n2 - for i = 1:n1 - v = Float32(u[i,j]) - - # calculating individual currents - iK1 = calc_iK1(v) - ix1 = calc_ix1(v, XI[i,j]) - iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j]) - iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j]) - - # total current - I_sum = iK1 + ix1 + iNa + iCa - - # the reaction part of the reaction-diffusion equation - du[i,j] = -I_sum / C_m - end - end -end - - -function (f::BeelerReuterCpu)(du, u, p, t) - Δt = t - f.t - - if Δt != 0 || t == 0 - update_gates_cpu(u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C, Δt) - f.t = t - end - - laplacian(f.Δu, u) - - # calculate the reaction portion - update_du_cpu(du, u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C) - - # ...add the diffusion portion - du .+= f.diff_coef .* f.Δu -end - - -const N = 192; -u0 = fill(v0, (N, N)); -u0[90:102,90:102] .= v1; # a small square in the middle of the domain - - -using Plots -heatmap(u0) - - -using DifferentialEquations, Sundials - -deriv_cpu = BeelerReuterCpu(u0, 1.0); -prob = ODEProblem(deriv_cpu, u0, (0.0, 50.0)); - - -@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0); - - -heatmap(sol.u[end]) - - -using CUDAnative, CuArrays - -mutable struct BeelerReuterGpu <: Function - t::Float64 # the last timestep time to calculate Δt - diff_coef::Float64 # the diffusion-coefficient (coupling strength) - - d_C::CuArray{Float32, 2} # intracellular calcium concentration - d_M::CuArray{Float32, 2} # sodium current activation gate (m) - d_H::CuArray{Float32, 2} # sodium current inactivation gate (h) - d_J::CuArray{Float32, 2} # sodium current slow inactivaiton gate (j) - d_D::CuArray{Float32, 2} # calcium current activaiton gate (d) - d_F::CuArray{Float32, 2} # calcium current inactivation gate (f) - d_XI::CuArray{Float32, 2} # inward-rectifying potassium current (iK1) - - d_u::CuArray{Float64, 2} # place-holder for u in the device memory - d_du::CuArray{Float64, 2} # place-holder for d_u in the device memory - - Δv::Array{Float64, 2} # place-holder for voltage gradient - - function BeelerReuterGpu(u0, diff_coef) - self = new() - - ny, nx = size(u0) - @assert (nx % 16 == 0) && (ny % 16 == 0) - self.t = 0.0 - self.diff_coef = diff_coef - - self.d_C = CuArray(fill(0.0001f0, (ny,nx))) - self.d_M = CuArray(fill(0.01f0, (ny,nx))) - self.d_H = CuArray(fill(0.988f0, (ny,nx))) - self.d_J = CuArray(fill(0.975f0, (ny,nx))) - self.d_D = CuArray(fill(0.003f0, (ny,nx))) - self.d_F = CuArray(fill(0.994f0, (ny,nx))) - self.d_XI = CuArray(fill(0.0001f0, (ny,nx))) - - self.d_u = CuArray(u0) - self.d_du = CuArray(zeros(ny,nx)) - - self.Δv = zeros(ny,nx) - - return self - end -end - - -function rush_larsen_gpu(g, α, β, Δt) - inf = α/(α+β) - τ = 1.0/(α+β) - return clamp(g + (g - inf) * CUDAnative.expm1(-Δt/τ), 0f0, 1f0) -end - -function update_M_gpu(g, v, Δt) - # the condition is needed here to prevent NaN when v == 47.0 - α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (CUDAnative.exp(-0.1f0*(v+47.0f0)) - 1.0f0) - β = (40.0f0 * CUDAnative.exp(-0.056f0*(v+72.0f0))) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_H_gpu(g, v, Δt) - α = 0.126f0 * CUDAnative.exp(-0.25f0*(v+77.0f0)) - β = 1.7f0 / (CUDAnative.exp(-0.082f0*(v+22.5f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_J_gpu(g, v, Δt) - α = (0.55f0 * CUDAnative.exp(-0.25f0*(v+78.0f0))) / (CUDAnative.exp(-0.2f0*(v+78.0f0)) + 1.0f0) - β = 0.3f0 / (CUDAnative.exp(-0.1f0*(v+32.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_D_gpu(g, v, Δt) - α = γ * (0.095f0 * CUDAnative.exp(-0.01f0*(v-5.0f0))) / (CUDAnative.exp(-0.072f0*(v-5.0f0)) + 1.0f0) - β = γ * (0.07f0 * CUDAnative.exp(-0.017f0*(v+44.0f0))) / (CUDAnative.exp(0.05f0*(v+44.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_F_gpu(g, v, Δt) - α = γ * (0.012f0 * CUDAnative.exp(-0.008f0*(v+28.0f0))) / (CUDAnative.exp(0.15f0*(v+28.0f0)) + 1.0f0) - β = γ * (0.0065f0 * CUDAnative.exp(-0.02f0*(v+30.0f0))) / (CUDAnative.exp(-0.2f0*(v+30.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_XI_gpu(g, v, Δt) - α = (0.0005f0 * CUDAnative.exp(0.083f0*(v+50.0f0))) / (CUDAnative.exp(0.057f0*(v+50.0f0)) + 1.0f0) - β = (0.0013f0 * CUDAnative.exp(-0.06f0*(v+20.0f0))) / (CUDAnative.exp(-0.04f0*(v+20.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_C_gpu(c, d, f, v, Δt) - ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c) - kCa = C_s * g_s * d * f - iCa = kCa * (v - ECa) - inf = 1.0f-7 * (0.07f0 - c) - τ = 1f0 / 0.07f0 - return c + (c - inf) * CUDAnative.expm1(-Δt/τ) -end - - -# iK1 is the inward-rectifying potassium current -function calc_iK1(v) - ea = CUDAnative.exp(0.04f0*(v+85f0)) - eb = CUDAnative.exp(0.08f0*(v+53f0)) - ec = CUDAnative.exp(0.04f0*(v+53f0)) - ed = CUDAnative.exp(-0.04f0*(v+23f0)) - return 0.35f0 * (4f0*(ea-1f0)/(eb + ec) - + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed))) -end - -# ix1 is the time-independent background potassium current -function calc_ix1(v, xi) - ea = CUDAnative.exp(0.04f0*(v+77f0)) - eb = CUDAnative.exp(0.04f0*(v+35f0)) - return xi * 0.8f0 * (ea-1f0) / eb -end - -# iNa is the sodium current (similar to the classic Hodgkin-Huxley model) -function calc_iNa(v, m, h, j) - return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa) -end - -# iCa is the calcium current -function calc_iCa(v, d, f, c) - ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c) # ECa is the calcium reversal potential - return C_s * g_s * d * f * (v - ECa) -end - - -function update_gates_gpu(u, XI, M, H, J, D, F, C, Δt) - i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x - j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y - - v = Float32(u[i,j]) - - let Δt = Float32(Δt) - XI[i,j] = update_XI_gpu(XI[i,j], v, Δt) - M[i,j] = update_M_gpu(M[i,j], v, Δt) - H[i,j] = update_H_gpu(H[i,j], v, Δt) - J[i,j] = update_J_gpu(J[i,j], v, Δt) - D[i,j] = update_D_gpu(D[i,j], v, Δt) - F[i,j] = update_F_gpu(F[i,j], v, Δt) - - C[i,j] = update_C_gpu(C[i,j], D[i,j], F[i,j], v, Δt) - end - nothing -end - -function update_du_gpu(du, u, XI, M, H, J, D, F, C) - i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x - j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y - - v = Float32(u[i,j]) - - # calculating individual currents - iK1 = calc_iK1(v) - ix1 = calc_ix1(v, XI[i,j]) - iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j]) - iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j]) - - # total current - I_sum = iK1 + ix1 + iNa + iCa - - # the reaction part of the reaction-diffusion equation - du[i,j] = -I_sum / C_m - nothing -end - - -function (f::BeelerReuterGpu)(du, u, p, t) - L = 16 # block size - Δt = t - f.t - copyto!(f.d_u, u) - ny, nx = size(u) - - if Δt != 0 || t == 0 - @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_gates_gpu( - f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C, Δt) - f.t = t - end - - laplacian(f.Δv, u) - - # calculate the reaction portion - @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_du_gpu( - f.d_du, f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C) - - copyto!(du, f.d_du) - - # ...add the diffusion portion - du .+= f.diff_coef .* f.Δv -end - - -using DifferentialEquations, Sundials - -deriv_gpu = BeelerReuterGpu(u0, 1.0); -prob = ODEProblem(deriv_gpu, u0, (0.0, 50.0)); -@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0); - - -heatmap(sol.u[end]) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/introduction/callbacks_and_events.jl b/script/introduction/callbacks_and_events.jl deleted file mode 100644 index d479d7a1..00000000 --- a/script/introduction/callbacks_and_events.jl +++ /dev/null @@ -1,164 +0,0 @@ - -using DifferentialEquations, ParameterizedFunctions -ball! = @ode_def BallBounce begin - dy = v - dv = -g -end g - - -function condition(u,t,integrator) - u[1] -end - - -function affect!(integrator) - integrator.u[2] = -integrator.p[2] * integrator.u[2] -end - - -bounce_cb = ContinuousCallback(condition,affect!) - - -u0 = [50.0,0.0] -tspan = (0.0,15.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=bounce_cb) - - -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) - - -function condition_kick(u,t,integrator) - t == 2 -end - - -function affect_kick!(integrator) - integrator.u[2] += 50 -end - - -kick_cb = DiscreteCallback(condition_kick,affect_kick!) -u0 = [50.0,0.0] -tspan = (0.0,10.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=kick_cb) - - -sol = solve(prob,Tsit5(),tstops=[2.0]) -plot(sol) - - -cb = CallbackSet(bounce_cb,kick_cb) - - -u0 = [50.0,0.0] -tspan = (0.0,15.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=cb) -sol = solve(prob,Tsit5(),tstops=[2.0]) -plot(sol) - - -u0 = [1.,0.] -harmonic! = @ode_def HarmonicOscillator begin - dv = -x - dx = v -end -tspan = (0.0,10.0) -prob = ODEProblem(harmonic!,u0,tspan) -sol = solve(prob) -plot(sol) - - -function terminate_affect!(integrator) - terminate!(integrator) -end - - -function terminate_condition(u,t,integrator) - u[2] -end -terminate_cb = ContinuousCallback(terminate_condition,terminate_affect!) - - -sol = solve(prob,callback=terminate_cb) -plot(sol) - - -sol.t[end] - - -terminate_upcrossing_cb = ContinuousCallback(terminate_condition,terminate_affect!,nothing) - - -sol = solve(prob,callback=terminate_upcrossing_cb) -plot(sol) - - -tspan = (0.0,10000.0) -prob = ODEProblem(harmonic!,u0,tspan) -sol = solve(prob) -gr(fmt=:png) # Make it a PNG instead of an SVG since there's a lot of points! -plot(sol,vars=(1,2)) - - -plot(sol,vars=(0,1),denseplot=false) - - -plot(sol.t,[u[2]^2 + u[1]^2 for u in sol.u]) # Energy ~ x^2 + v^2 - - -function g(resid,u,p,t) - resid[1] = u[2]^2 + u[1]^2 - 1 - resid[2] = 0 -end - - -cb = ManifoldProjection(g) -sol = solve(prob,callback=cb) -plot(sol,vars=(1,2)) - - -plot(sol,vars=(0,1),denseplot=false) - - -u1,u2 = sol[500] -u2^2 + u1^2 - - -prob = ODEProblem((du,u,p,t)->du.=u,rand(1000,1000),(0.0,1.0)) - - -saved_values = SavedValues(Float64, Tuple{Float64,Float64}) - - -using LinearAlgebra -cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values) - - -sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving - - -saved_values.t - - -saved_values.saveval - - -saved_values = SavedValues(Float64, Tuple{Float64,Float64}) # New cache -cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values, saveat = 0.0:0.1:1.0) -sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving - - -saved_values.t - - -saved_values.saveval - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/introduction/choosing_algs.jl b/script/introduction/choosing_algs.jl deleted file mode 100644 index b6701eff..00000000 --- a/script/introduction/choosing_algs.jl +++ /dev/null @@ -1,49 +0,0 @@ - -using DifferentialEquations, ParameterizedFunctions -van! = @ode_def VanDerPol begin - dy = μ*((1-x^2)*y - x) - dx = 1*y -end μ - -prob = ODEProblem(van!,[0.0,2.0],(0.0,6.3),1e6) - - -sol = solve(prob,Tsit5()) - - -sol = solve(prob,alg_hints = [:stiff]) - - -sol = solve(prob) - - -using Plots; gr() -sol = solve(prob,alg_hints = [:stiff],reltol=1e-6) -plot(sol,denseplot=false) - - -plot(sol,ylims = (-10.0,10.0)) - - -function lorenz!(du,u,p,t) - σ,ρ,β = p - du[1] = σ*(u[2]-u[1]) - du[2] = u[1]*(ρ-u[3]) - u[2] - du[3] = u[1]*u[2] - β*u[3] -end -u0 = [1.0,0.0,0.0] -p = (10,28,8/3) -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan,p) - - -using BenchmarkTools -@btime solve(prob); - - -@btime solve(prob,alg_hints = [:stiff]); - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/introduction/formatting_plots.jl b/script/introduction/formatting_plots.jl deleted file mode 100644 index afc82412..00000000 --- a/script/introduction/formatting_plots.jl +++ /dev/null @@ -1,57 +0,0 @@ - -using DifferentialEquations, Plots, ParameterizedFunctions -gr() -lorenz = @ode_def Lorenz begin - dx = σ*(y-x) - dy = ρ*x-y-x*z - dz = x*y-β*z -end σ β ρ - -p = [10.0,8/3,28] -u0 = [1., 5., 10.] -tspan = (0., 100.) -prob = ODEProblem(lorenz, u0, tspan, p) -sol = solve(prob) - - -plot(sol) - - -plot(sol,vars=(:x,:y,:z)) - - -plot(sol,vars=[:x]) - - -plot(sol,vars=(1,2,3)) -plot(sol,vars=[1]) - - -plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line", -xaxis="Time (t)",yaxis="u(t) (in mm)",label=["X","Y","Z"]) - - -scatter(sol,vars=[:x]) - - -plot(sol,vars=(1,2,3),denseplot=false) - - -plot(sol,vars=(1,2,3),plotdensity=100) - - -plot(sol,vars=(1,2,3),plotdensity=10000) - - -plot(sol,vars=(1,2,3)) -scatter!(sol,vars=(1,2,3),plotdensity=100) - - -p = plot(sol,vars=(1,2,3)) -scatter!(p,sol,vars=(1,2,3),plotdensity=100) -title!("I added a title") - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/introduction/ode_introduction.jl b/script/introduction/ode_introduction.jl deleted file mode 100644 index 68022532..00000000 --- a/script/introduction/ode_introduction.jl +++ /dev/null @@ -1,180 +0,0 @@ - -f(u,p,t) = 0.98u - - -using DifferentialEquations -f(u,p,t) = 0.98u -u0 = 1.0 -tspan = (0.0,1.0) -prob = ODEProblem(f,u0,tspan) - - -sol = solve(prob) - - -using Plots; gr() -plot(sol) - - -plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line", - xaxis="Time (t)",yaxis="u(t) (in μm)",label="My Thick Line!") # legend=false - - -plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!") - - -sol.t - - -sol.u - - -[t+u for (u,t) in tuples(sol)] - - -sol - - -sol(0.45) - - -sol = solve(prob,abstol=1e-8,reltol=1e-8) - - -plot(sol) -plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!") - - -sol = solve(prob,saveat=0.1) - - -sol = solve(prob,saveat=[0.2,0.7,0.9]) - - -sol = solve(prob,dense=false) - - -sol = solve(prob,save_everystep=false) - - -sol = solve(prob,save_everystep=false,save_start = false) - - -sol = solve(prob,alg_hints=[:stiff]) - - -sol = solve(prob,Tsit5(),reltol=1e-6) - - -function lorenz!(du,u,p,t) - σ,ρ,β = p - du[1] = σ*(u[2]-u[1]) - du[2] = u[1]*(ρ-u[3]) - u[2] - du[3] = u[1]*u[2] - β*u[3] -end - - -u0 = [1.0,0.0,0.0] - - -p = (10,28,8/3) # we could also make this an array, or any other type! - - -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan,p) - - -sol = solve(prob) - - -sol.t[10],sol[10] - - -sol[2,10] - - -A = Array(sol) - - -plot(sol) - - -plot(sol,vars=(1,2,3)) - - -plot(sol,vars=(1,2,3),denseplot=false) - - -plot(sol,vars=(0,2)) - - -function lotka_volterra!(du,u,p,t) - du[1] = p[1]*u[1] - p[2]*u[1]*u[2] - du[2] = -p[3]*u[2] + p[4]*u[1]*u[2] -end - - -using ParameterizedFunctions -lv! = @ode_def LotkaVolterra begin - dx = a*x - b*x*y - dy = -c*y + d*x*y -end a b c d - - -u0 = [1.0,1.0] -p = (1.5,1.0,3.0,1.0) -tspan = (0.0,10.0) -prob = ODEProblem(lv!,u0,tspan,p) -sol = solve(prob) -plot(sol) - - -lv!.Jex - - -A = [1. 0 0 -5 - 4 -2 4 -3 - -4 0 0 1 - 5 -2 2 3] -u0 = rand(4,2) -tspan = (0.0,1.0) -f(u,p,t) = A*u -prob = ODEProblem(f,u0,tspan) -sol = solve(prob) - - -sol[3] - - -big_u0 = big.(u0) - - -prob = ODEProblem(f,big_u0,tspan) -sol = solve(prob) - - -sol[1,3] - - -prob = ODEProblem(f,big_u0,big.(tspan)) -sol = solve(prob) - - -using StaticArrays -A = @SMatrix [ 1.0 0.0 0.0 -5.0 - 4.0 -2.0 4.0 -3.0 - -4.0 0.0 0.0 1.0 - 5.0 -2.0 2.0 3.0] -u0 = @SMatrix rand(4,2) -tspan = (0.0,1.0) -f(u,p,t) = A*u -prob = ODEProblem(f,u0,tspan) -sol = solve(prob) - - -sol[3] - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/introduction/optimizing_diffeq_code.jl b/script/introduction/optimizing_diffeq_code.jl deleted file mode 100644 index f9e55ece..00000000 --- a/script/introduction/optimizing_diffeq_code.jl +++ /dev/null @@ -1,325 +0,0 @@ - -function lorenz(u,p,t) - dx = 10.0*(u[2]-u[1]) - dy = u[1]*(28.0-u[3]) - u[2] - dz = u[1]*u[2] - (8/3)*u[3] - [dx,dy,dz] -end - - -using DifferentialEquations, BenchmarkTools -u0 = [1.0;0.0;0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz,u0,tspan) -@benchmark solve(prob,Tsit5()) - - -@benchmark solve(prob,Tsit5(),save_everystep=false) - - -function lorenz!(du,u,p,t) - du[1] = 10.0*(u[2]-u[1]) - du[2] = u[1]*(28.0-u[3]) - u[2] - du[3] = u[1]*u[2] - (8/3)*u[3] -end - - -u0 = [1.0;0.0;0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan) -@benchmark solve(prob,Tsit5()) - - -@benchmark solve(prob,Tsit5(),save_everystep=false) - - -tspan = (0.0,500.0) # 5x longer than before -prob = ODEProblem(lorenz!,u0,tspan) -@benchmark solve(prob,Tsit5(),save_everystep=false) - - -using StaticArrays -A = @SVector [2.0,3.0,5.0] - - -function lorenz_static(u,p,t) - dx = 10.0*(u[2]-u[1]) - dy = u[1]*(28.0-u[3]) - u[2] - dz = u[1]*u[2] - (8/3)*u[3] - @SVector [dx,dy,dz] -end - - -u0 = @SVector [1.0,0.0,0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz_static,u0,tspan) -@benchmark solve(prob,Tsit5()) - - -@benchmark solve(prob,Tsit5(),save_everystep=false) - - -A = rand(1000,1000); B = rand(1000,1000); C = rand(1000,1000) -test(A,B,C) = A + B + C -@benchmark test(A,B,C) - - -test2(A,B,C) = map((a,b,c)->a+b+c,A,B,C) -@benchmark test2(A,B,C) - - -function test3(A,B,C) - D = similar(A) - @inbounds for i in eachindex(A) - D[i] = A[i] + B[i] + C[i] - end - D -end -@benchmark test3(A,B,C) - - -test4(A,B,C) = A .+ B .+ C -@benchmark test4(A,B,C) - - -sin.(A) .+ sin.(B) - - -test5(A,B,C) = @. A + B + C #only one array allocated -@benchmark test5(A,B,C) - - -D = zeros(1000,1000); - - -test6!(D,A,B,C) = D .= A .+ B .+ C #only one array allocated -@benchmark test6!(D,A,B,C) - - -test7!(D,A,B,C) = @. D = A + B + C #only one array allocated -@benchmark test7!(D,A,B,C) - - -test8!(D,A,B,C) = map!((a,b,c)->a+b+c,D,A,B,C) -@benchmark test8!(D,A,B,C) - - -@benchmark A*B - - -using LinearAlgebra -@benchmark mul!(D,A,B) # same as D = A * B - - -# Generate the constants -p = (1.0,1.0,1.0,10.0,0.001,100.0) # a,α,ubar,β,D1,D2 -N = 100 -Ax = Array(Tridiagonal([1.0 for i in 1:N-1],[-2.0 for i in 1:N],[1.0 for i in 1:N-1])) -Ay = copy(Ax) -Ax[2,1] = 2.0 -Ax[end-1,end] = 2.0 -Ay[1,2] = 2.0 -Ay[end,end-1] = 2.0 - -function basic_version!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = r[:,:,1] - v = r[:,:,2] - Du = D1*(Ay*u + u*Ax) - Dv = D2*(Ay*v + v*Ax) - dr[:,:,1] = Du .+ a.*u.*u./v .+ ubar .- α*u - dr[:,:,2] = Dv .+ a.*u.*u .- β*v -end - -a,α,ubar,β,D1,D2 = p -uss = (ubar+β)/α -vss = (a/β)*uss^2 -r0 = zeros(100,100,2) -r0[:,:,1] .= uss.+0.1.*rand.() -r0[:,:,2] .= vss - -prob = ODEProblem(basic_version!,r0,(0.0,0.1),p) - - -@benchmark solve(prob,Tsit5()) - - -A = rand(4) -@show A -B = @view A[1:3] -B[2] = 2 -@show A - - -function gm2!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - Du = D1*(Ay*u + u*Ax) - Dv = D2*(Ay*v + v*Ax) - @. du = Du + a.*u.*u./v + ubar - α*u - @. dv = Dv + a.*u.*u - β*v -end -prob = ODEProblem(gm2!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) - - -Ayu = zeros(N,N) -uAx = zeros(N,N) -Du = zeros(N,N) -Ayv = zeros(N,N) -vAx = zeros(N,N) -Dv = zeros(N,N) -function gm3!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - mul!(Ayu,Ay,u) - mul!(uAx,u,Ax) - mul!(Ayv,Ay,v) - mul!(vAx,v,Ax) - @. Du = D1*(Ayu + uAx) - @. Dv = D2*(Ayv + vAx) - @. du = Du + a*u*u./v + ubar - α*u - @. dv = Dv + a*u*u - β*v -end -prob = ODEProblem(gm3!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) - - -p = (1.0,1.0,1.0,10.0,0.001,100.0,Ayu,uAx,Du,Ayv,vAx,Dv) # a,α,ubar,β,D1,D2 -function gm4!(dr,r,p,t) - a,α,ubar,β,D1,D2,Ayu,uAx,Du,Ayv,vAx,Dv = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - mul!(Ayu,Ay,u) - mul!(uAx,u,Ax) - mul!(Ayv,Ay,v) - mul!(vAx,v,Ax) - @. Du = D1*(Ayu + uAx) - @. Dv = D2*(Ayv + vAx) - @. du = Du + a*u*u./v + ubar - α*u - @. dv = Dv + a*u*u - β*v -end -prob = ODEProblem(gm4!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) - - -p = (1.0,1.0,1.0,10.0,0.001,100.0,N) -function fast_gm!(du,u,p,t) - a,α,ubar,β,D1,D2,N = p - - @inbounds for j in 2:N-1, i in 2:N-1 - du[i,j,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - - @inbounds for j in 2:N-1, i in 2:N-1 - du[i,j,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds for j in 2:N-1 - i = 1 - du[1,j,1] = D1*(2u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for j in 2:N-1 - i = 1 - du[1,j,2] = D2*(2u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - @inbounds for j in 2:N-1 - i = N - du[end,j,1] = D1*(2u[i-1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for j in 2:N-1 - i = N - du[end,j,2] = D2*(2u[i-1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds for i in 2:N-1 - j = 1 - du[i,1,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for i in 2:N-1 - j = 1 - du[i,1,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - @inbounds for i in 2:N-1 - j = N - du[i,end,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for i in 2:N-1 - j = N - du[i,end,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds begin - i = 1; j = 1 - du[1,1,1] = D1*(2u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[1,1,2] = D2*(2u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = 1; j = N - du[1,N,1] = D1*(2u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[1,N,2] = D2*(2u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = N; j = 1 - du[N,1,1] = D1*(2u[i-1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[N,1,2] = D2*(2u[i-1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = N; j = N - du[end,end,1] = D1*(2u[i-1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[end,end,2] = D2*(2u[i-1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end -end -prob = ODEProblem(fast_gm!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) - - -prob = ODEProblem(fast_gm!,r0,(0.0,10.0),p) -@benchmark solve(prob,Tsit5()) - - -using Sundials -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES)) - - -prob = ODEProblem(fast_gm!,r0,(0.0,100.0),p) -# Will go out of memory if we don't turn off `save_everystep`! -@benchmark solve(prob,Tsit5(),save_everystep=false) - - -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES)) - - -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) - - -prob = ODEProblem(fast_gm!,r0,(0.0,500.0),p) -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/models/classical_physics.jl b/script/models/classical_physics.jl deleted file mode 100644 index f99a604b..00000000 --- a/script/models/classical_physics.jl +++ /dev/null @@ -1,234 +0,0 @@ - -using OrdinaryDiffEq, Plots -gr() - -#Half-life of Carbon-14 is 5,730 years. -C₁ = 5.730 - -#Setup -u₀ = 1.0 -tspan = (0.0, 1.0) - -#Define the problem -radioactivedecay(u,p,t) = -C₁*u - -#Pass to solver -prob = ODEProblem(radioactivedecay,u₀,tspan) -sol = solve(prob,Tsit5()) - -#Plot -plot(sol,linewidth=2,title ="Carbon-14 half-life", xaxis = "Time in thousands of years", yaxis = "Percentage left", label = "Numerical Solution") -plot!(sol.t, t->exp(-C₁*t),lw=3,ls=:dash,label="Analytical Solution") - - -# Simple Pendulum Problem -using OrdinaryDiffEq, Plots - -#Constants -const g = 9.81 -L = 1.0 - -#Initial Conditions -u₀ = [0,π/2] -tspan = (0.0,6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L)*sin(θ) -end - -#Pass to solvers -prob = ODEProblem(simplependulum,u₀, tspan) -sol = solve(prob,Tsit5()) - -#Plot -plot(sol,linewidth=2,title ="Simple Pendulum Problem", xaxis = "Time", yaxis = "Height", label = ["Theta","dTheta"]) - - -p = plot(sol,vars = (1,2), xlims = (-9,9), title = "Phase Space Plot", xaxis = "Velocity", yaxis = "Position", leg=false) -function phase_plot(prob, u0, p, tspan=2pi) - _prob = ODEProblem(prob.f,u0,(0.0,tspan)) - sol = solve(_prob,Vern9()) # Use Vern9 solver for higher accuracy - plot!(p,sol,vars = (1,2), xlims = nothing, ylims = nothing) -end -for i in -4pi:pi/2:4π - for j in -4pi:pi/2:4π - phase_plot(prob, [j,i], p) - end -end -plot(p,xlims = (-9,9)) - - -#Double Pendulum Problem -using OrdinaryDiffEq, Plots - -#Constants and setup -const m₁, m₂, L₁, L₂ = 1, 2, 1, 2 -initial = [0, π/3, 0, 3pi/5] -tspan = (0.,50.) - -#Convenience function for transforming from polar to Cartesian coordinates -function polar2cart(sol;dt=0.02,l1=L₁,l2=L₂,vars=(2,4)) - u = sol.t[1]:dt:sol.t[end] - - p1 = l1*map(x->x[vars[1]], sol.(u)) - p2 = l2*map(y->y[vars[2]], sol.(u)) - - x1 = l1*sin.(p1) - y1 = l1*-cos.(p1) - (u, (x1 + l2*sin.(p2), - y1 - l2*cos.(p2))) -end - -#Define the Problem -function double_pendulum(xdot,x,p,t) - xdot[1]=x[2] - xdot[2]=-((g*(2*m₁+m₂)*sin(x[1])+m₂*(g*sin(x[1]-2*x[3])+2*(L₂*x[4]^2+L₁*x[2]^2*cos(x[1]-x[3]))*sin(x[1]-x[3])))/(2*L₁*(m₁+m₂-m₂*cos(x[1]-x[3])^2))) - xdot[3]=x[4] - xdot[4]=(((m₁+m₂)*(L₁*x[2]^2+g*cos(x[1]))+L₂*m₂*x[4]^2*cos(x[1]-x[3]))*sin(x[1]-x[3]))/(L₂*(m₁+m₂-m₂*cos(x[1]-x[3])^2)) -end - -#Pass to Solvers -double_pendulum_problem = ODEProblem(double_pendulum, initial, tspan) -sol = solve(double_pendulum_problem, Vern7(), abs_tol=1e-10, dt=0.05); - - -#Obtain coordinates in Cartesian Geometry -ts, ps = polar2cart(sol, l1=L₁, l2=L₂, dt=0.01) -plot(ps...) - - -#Constants and setup -using OrdinaryDiffEq -initial2 = [0.01, 0.005, 0.01, 0.01] -tspan2 = (0.,200.) - -#Define the problem -function double_pendulum_hamiltonian(udot,u,p,t) - α = u[1] - lα = u[2] - β = u[3] - lβ = u[4] - udot .= - [2(lα-(1+cos(β))lβ)/(3-cos(2β)), - -2sin(α) - sin(α+β), - 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)), - -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)] -end - -# Construct a ContiunousCallback -condition(u,t,integrator) = u[1] -affect!(integrator) = nothing -cb = ContinuousCallback(condition,affect!,nothing, - save_positions = (true,false)) - -# Construct Problem -poincare = ODEProblem(double_pendulum_hamiltonian, initial2, tspan2) -sol2 = solve(poincare, Vern9(), save_everystep = false, callback=cb, abstol=1e-9) - -function poincare_map(prob, u₀, p; callback=cb) - _prob = ODEProblem(prob.f,[0.01, 0.01, 0.01, u₀],prob.tspan) - sol = solve(_prob, Vern9(), save_everystep = false, callback=cb, abstol=1e-9) - scatter!(p, sol, vars=(3,4), markersize = 2) -end - - -p = scatter(sol2, vars=(3,4), leg=false, markersize = 2, ylims=(-0.01,0.03)) -for i in -0.01:0.00125:0.01 - poincare_map(poincare, i, p) -end -plot(p,ylims=(-0.01,0.03)) - - -using OrdinaryDiffEq, Plots - -#Setup -initial = [0.,0.1,0.5,0] -tspan = (0,100.) - -#Remember, V is the potential of the system and T is the Total Kinetic Energy, thus E will -#the total energy of the system. -V(x,y) = 1//2 * (x^2 + y^2 + 2x^2*y - 2//3 * y^3) -E(x,y,dx,dy) = V(x,y) + 1//2 * (dx^2 + dy^2); - -#Define the function -function Hénon_Heiles(du,u,p,t) - x = u[1] - y = u[2] - dx = u[3] - dy = u[4] - du[1] = dx - du[2] = dy - du[3] = -x - 2x*y - du[4] = y^2 - y -x^2 -end - -#Pass to solvers -prob = ODEProblem(Hénon_Heiles, initial, tspan) -sol = solve(prob, Vern9(), abs_tol=1e-16, rel_tol=1e-16); - - -# Plot the orbit -plot(sol, vars=(1,2), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false) - - -#Optional Sanity check - what do you think this returns and why? -@show sol.retcode - -#Plot - -plot(sol, vars=(1,3), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity") -plot!(sol, vars=(2,4), leg = false) - - -#We map the Total energies during the time intervals of the solution (sol.u here) to a new vector -#pass it to the plotter a bit more conveniently -energy = map(x->E(x...), sol.u) - -#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great. -@show ΔE = energy[1]-energy[end] - -#Plot -plot(sol.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") - - -function HH_acceleration!(dv,v,u,p,t) - x,y = u - dx,dy = dv - dv[1] = -x - 2x*y - dv[2] = y^2 - y -x^2 -end -initial_positions = [0.0,0.1] -initial_velocities = [0.5,0.0] -prob = SecondOrderODEProblem(HH_acceleration!,initial_velocities,initial_positions,tspan) -sol2 = solve(prob, KahanLi8(), dt=1/10); - - -# Plot the orbit -plot(sol2, vars=(3,4), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false) - - -plot(sol2, vars=(3,1), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity") -plot!(sol2, vars=(4,2), leg = false) - - -energy = map(x->E(x[3], x[4], x[1], x[2]), sol2.u) -#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great. -@show ΔE = energy[1]-energy[end] - -#Plot -plot(sol2.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") - - -sol3 = solve(prob, DPRKN6()); -energy = map(x->E(x[3], x[4], x[1], x[2]), sol3.u) -@show ΔE = energy[1]-energy[end] -gr() -plot(sol3.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/models/conditional_dosing.jl b/script/models/conditional_dosing.jl deleted file mode 100644 index a43befed..00000000 --- a/script/models/conditional_dosing.jl +++ /dev/null @@ -1,50 +0,0 @@ - -using DifferentialEquations -function f(du,u,p,t) - du[1] = -u[1] -end -u0 = [10.0] -const V = 1 -prob = ODEProblem(f,u0,(0.0,10.0)) - - -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) - - -condition(u,t,integrator) = t==4 && u[1]/V<4 -affect!(integrator) = integrator.u[1] += 10 -cb = DiscreteCallback(condition,affect!) - - -sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb) -using Plots; gr() -plot(sol) - - -println(sol(4.00000)) -println(sol(4.000000000001)) - - -function f(du,u,p,t) - du[1] = -u[1]/6 -end -u0 = [10.0] -const V = 1 -prob = ODEProblem(f,u0,(0.0,10.0)) - - -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) - - -sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb) -using Plots; gr() -plot(sol) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/models/diffeqbio_II_networkproperties.jl b/script/models/diffeqbio_II_networkproperties.jl deleted file mode 100644 index 5c9a0d2f..00000000 --- a/script/models/diffeqbio_II_networkproperties.jl +++ /dev/null @@ -1,190 +0,0 @@ - -using DifferentialEquations, DiffEqBiological, Latexify, Plots -fmt = :svg -pyplot(fmt=fmt) -rn = @reaction_network begin - hillr(D₂,α,K,n), ∅ --> m₁ - hillr(D₁,α,K,n), ∅ --> m₂ - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - μ, P₁ --> ∅ - μ, P₂ --> ∅ - (k₊,k₋), 2P₁ ↔ D₁ - (k₊,k₋), 2P₂ ↔ D₂ - (k₊,k₋), P₁+P₂ ↔ T -end α K n δ γ β μ k₊ k₋; - - -latexify(rn; env=:chemical) - - -x = latexify(rn; env=:chemical, starred=true, mathjax=true); -display("text/latex", "$x"); - - -species(rn) - - -params(rn) - - -substratesymstoich(rn, 11) - - -substratesymstoich.(rn, 1:numreactions(rn)) - - -netstoich.(rn, 1:numreactions(rn)) - - -rxtospecies_depgraph(rn) - - -species(rn)[[3,4,7]] - - -speciestorx_depgraph(rn)[1] - - -findall(depset -> in(:m₁, depset), dependents.(rn, 1:numreactions(rn))) - - -rxtorx_depgraph(rn) - - -rnmin = @min_reaction_network begin - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - μ, P₁ --> ∅ - μ, P₂ --> ∅ -end δ γ β μ; - - -addspecies!(rnmin, :D₁) -addspecies!(rnmin, :D₂) -addspecies!(rnmin, :T) - - -addparam!(rnmin, :α) -addparam!(rnmin, :K) -addparam!(rnmin, :n) -addparam!(rnmin, :k₊) -addparam!(rnmin, :k₋) - - -addreaction!(rnmin, :(hillr(D₁,α,K,n)), :(∅ --> m₂)) -addreaction!(rnmin, :((k₊,k₋)), :(2P₂ ↔ D₂)) -addreaction!(rnmin, :k₊, :(2P₁ --> D₁)) -addreaction!(rnmin, :k₋, :(D₁ --> 2P₁)) - - -# signature is addreaction!(rnmin, paramexpr, substratestoich, productstoich) -addreaction!(rnmin, :(hillr(D₂,α,K,n)), (), (:m₁ => 1,)) -addreaction!(rnmin, :k₊, (:P₁=>1, :P₂=>1), (:T=>1,)) -addreaction!(rnmin, :k₋, (:T=>1,), (:P₁=>1, :P₂=>1)) - - -setdiff(species(rn), species(rnmin)) - - -setdiff(params(rn), params(rnmin)) - - -rxidx = numreactions(rn) -setdiff(substrates(rn, rxidx), substrates(rnmin, rxidx)) - - -setdiff(products(rn, rxidx), products(rnmin, rxidx)) - - -rateexpr(rn, rxidx) == rateexpr(rnmin, rxidx) - - -addodes!(rnmin) - - -odeexprs(rnmin) - - -latexify(rnmin) - - -x = latexify(rnmin, starred=true); -display("text/latex", "$x"); - - -latexify(jacobianexprs(rnmin)) - - -x = latexify(jacobianexprs(rnmin), starred=true); -display("text/latex", "$x"); - - -N = 64 -h = 1 / N - - -rn = @empty_reaction_network - -for i = 1:N - addspecies!(rn, Symbol(:u, i)) -end - - -addparam!(rn, :β) - - -for i = 1:N - (i < N) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i+1)=>1,)) - (i > 1) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i-1)=>1,)) -end - - -addodes!(rn) - - -u₀ = zeros(N) -u₀[div(N,2)] = 10000 -p = [1/(h*h)] -tspan = (0.,.01) -oprob = ODEProblem(rn, u₀, tspan, p) - - -sol = solve(oprob, KenCarp4()) -times = [0., .0001, .001, .01] -plt = plot() -for time in times - plot!(plt, 1:N, sol(time), fmt=fmt, xlabel="i", ylabel="uᵢ", label=string("t = ", time), lw=3) -end -plot(plt, ylims=(0.,10000.)) - - -addjumps!(rn, build_regular_jumps=false, minimal_jumps=true) - -# make the initial condition integer valued -u₀ = zeros(Int, N) -u₀[div(N,2)] = 10000 - -# setup and solve the problem -dprob = DiscreteProblem(rn, u₀, tspan, p) -jprob = JumpProblem(dprob, DirectCR(), rn, save_positions=(false,false)) -jsol = solve(jprob, SSAStepper(), saveat=times) - - -times = [0., .0001, .001, .01] -plts = [] -for i = 1:4 - b = bar(1:N, jsol[i], legend=false, fmt=fmt, xlabel="i", ylabel="uᵢ", title=string("t = ", times[i])) - plot!(b,sol(times[i])) - push!(plts,b) -end -plot(plts...) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) - diff --git a/script/models/diffeqbio_I_introduction.jl b/script/models/diffeqbio_I_introduction.jl deleted file mode 100644 index 15c99ef6..00000000 --- a/script/models/diffeqbio_I_introduction.jl +++ /dev/null @@ -1,110 +0,0 @@ - -# If not already installed, first hit "]" within a Julia REPL. Then type: -# add DifferentialEquations DiffEqBiological PyPlot Plots Latexify - -using DifferentialEquations, DiffEqBiological, Plots, Latexify -pyplot(fmt=:svg); - - -repressilator = @reaction_network begin - hillr(P₃,α,K,n), ∅ --> m₁ - hillr(P₁,α,K,n), ∅ --> m₂ - hillr(P₂,α,K,n), ∅ --> m₃ - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - (δ,γ), m₃ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - β, m₃ --> m₃ + P₃ - μ, P₁ --> ∅ - μ, P₂ --> ∅ - μ, P₃ --> ∅ -end α K n δ γ β μ; - - -latexify(repressilator; env=:chemical) - - -x = latexify(repressilator; env=:chemical, starred=true, mathjax=true); -display("text/latex", "$x"); - - -latexify(repressilator) - - -x = latexify(repressilator, starred=true); -display("text/latex", "$x"); - - -speciesmap(repressilator) - - -paramsmap(repressilator) - - -# parameters [α,K,n,δ,γ,β,μ] -p = (.5, 40, 2, log(2)/120, 5e-3, 20*log(2)/120, log(2)/60) - -# initial condition [m₁,m₂,m₃,P₁,P₂,P₃] -u₀ = [0.,0.,0.,20.,0.,0.] - -# time interval to solve on -tspan = (0., 10000.) - -# create the ODEProblem we want to solve -oprob = ODEProblem(repressilator, u₀, tspan, p) - - -sol = solve(oprob, saveat=10.) -plot(sol, fmt=:svg) - - -# first we redefine the initial condition to be integer valued -u₀ = [0,0,0,20,0,0] - -# next we create a discrete problem to encode that our species are integer valued: -dprob = DiscreteProblem(repressilator, u₀, tspan, p) - -# now we create a JumpProblem, and specify Gillespie's Direct Method as the solver: -jprob = JumpProblem(dprob, Direct(), repressilator, save_positions=(false,false)) - -# now let's solve and plot the jump process: -sol = solve(jprob, SSAStepper(), saveat=10.) -plot(sol, fmt=:svg) - - -rjs = regularjumps(repressilator) -lprob = JumpProblem(dprob, Direct(), rjs) -lsol = solve(lprob, SimpleTauLeaping(), dt=.1) -plot(lsol, plotdensity=1000, fmt=:svg) - - -bdp = @reaction_network begin - c₁, X --> 2X - c₂, X --> 0 - c₃, 0 --> X -end c₁ c₂ c₃ -p = (1.0,2.0,50.) -u₀ = [5.] -tspan = (0.,4.); - - -# SDEProblem for CLE -sprob = SDEProblem(bdp, u₀, tspan, p) - -# solve and plot, tstops is used to specify enough points -# that the plot looks well-resolved -sol = solve(sprob, tstops=range(0., step=4e-3, length=1001)) -plot(sol, fmt=:svg) - - -latexify(jacobianexprs(repressilator)) - - -x = latexify(jacobianexprs(repressilator), starred=true); -display("text/latex", "$x"); - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) - diff --git a/script/models/kepler_problem.jl b/script/models/kepler_problem.jl deleted file mode 100644 index be9f97b1..00000000 --- a/script/models/kepler_problem.jl +++ /dev/null @@ -1,99 +0,0 @@ - -using OrdinaryDiffEq, LinearAlgebra, ForwardDiff, Plots; gr() -H(q,p) = norm(p)^2/2 - inv(norm(q)) -L(q,p) = q[1]*p[2] - p[1]*q[2] - -pdot(dp,p,q,params,t) = ForwardDiff.gradient!(dp, q->-H(q, p), q) -qdot(dq,p,q,params,t) = ForwardDiff.gradient!(dq, p-> H(q, p), p) - -initial_position = [.4, 0] -initial_velocity = [0., 2.] -initial_cond = (initial_position, initial_velocity) -initial_first_integrals = (H(initial_cond...), L(initial_cond...)) -tspan = (0,20.) -prob = DynamicalODEProblem(pdot, qdot, initial_velocity, initial_position, tspan) -sol = solve(prob, KahanLi6(), dt=1//10); - - -plot_orbit(sol) = plot(sol,vars=(3,4), lab="Orbit", title="Kepler Problem Solution") - -function plot_first_integrals(sol, H, L) - plot(initial_first_integrals[1].-map(u->H(u[2,:], u[1,:]), sol.u), lab="Energy variation", title="First Integrals") - plot!(initial_first_integrals[2].-map(u->L(u[2,:], u[1,:]), sol.u), lab="Angular momentum variation") -end -analysis_plot(sol, H, L) = plot(plot_orbit(sol), plot_first_integrals(sol, H, L)) - - -analysis_plot(sol, H, L) - - -sol2 = solve(prob, DPRKN6()) # dt is not necessary, because unlike symplectic - # integrators DPRKN6 is adaptive -@show sol2.u |> length -analysis_plot(sol2, H, L) - - -sol3 = solve(prob, ERKN4()) # dt is not necessary, because unlike symplectic - # integrators ERKN4 is adaptive -@show sol3.u |> length -analysis_plot(sol3, H, L) - - -sol4 = solve(prob, Tsit5()) -@show sol4.u |> length -analysis_plot(sol4, H, L) - - -using DiffEqCallbacks - -plot_orbit2(sol) = plot(sol,vars=(1,2), lab="Orbit", title="Kepler Problem Solution") - -function plot_first_integrals2(sol, H, L) - plot(initial_first_integrals[1].-map(u->H(u[1:2],u[3:4]), sol.u), lab="Energy variation", title="First Integrals") - plot!(initial_first_integrals[2].-map(u->L(u[1:2],u[3:4]), sol.u), lab="Angular momentum variation") -end - -analysis_plot2(sol, H, L) = plot(plot_orbit2(sol), plot_first_integrals2(sol, H, L)) - -function hamiltonian(du,u,params,t) - q, p = u[1:2], u[3:4] - qdot(@view(du[1:2]), p, q, params, t) - pdot(@view(du[3:4]), p, q, params, t) -end - -prob2 = ODEProblem(hamiltonian, [initial_position; initial_velocity], tspan) -sol_ = solve(prob2, RK4(), dt=1//5, adaptive=false) -analysis_plot2(sol_, H, L) - - -function first_integrals_manifold(residual,u) - residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4]) - residual[3:4] .= initial_first_integrals[2] - L(u[1:2], u[3:4]) -end - -cb = ManifoldProjection(first_integrals_manifold) -sol5 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=cb) -analysis_plot2(sol5, H, L) - - -function energy_manifold(residual,u) - residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4]) - residual[3:4] .= 0 -end -energy_cb = ManifoldProjection(energy_manifold) -sol6 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=energy_cb) -analysis_plot2(sol6, H, L) - - -function angular_manifold(residual,u) - residual[1:2] .= initial_first_integrals[2] - L(u[1:2], u[3:4]) - residual[3:4] .= 0 -end -angular_cb = ManifoldProjection(angular_manifold) -sol7 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=angular_cb) -analysis_plot2(sol7, H, L) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/models/outer_solar_system.jl b/script/models/outer_solar_system.jl deleted file mode 100644 index b3ee7e84..00000000 --- a/script/models/outer_solar_system.jl +++ /dev/null @@ -1,36 +0,0 @@ - -using Plots, OrdinaryDiffEq, DiffEqPhysics, RecursiveArrayTools -gr() - -G = 2.95912208286e-4 -M = [1.00000597682, 0.000954786104043, 0.000285583733151, 0.0000437273164546, 0.0000517759138449, 1/1.3e8] -planets = ["Sun", "Jupiter", "Saturn", "Uranus", "Neptune", "Pluto"] - -pos_x = [0.0,-3.5023653,9.0755314,8.3101420,11.4707666,-15.5387357] -pos_y = [0.0,-3.8169847,-3.0458353,-16.2901086,-25.7294829,-25.2225594] -pos_z = [0.0,-1.5507963,-1.6483708,-7.2521278,-10.8169456,-3.1902382] -pos = ArrayPartition(pos_x,pos_y,pos_z) - -vel_x = [0.0,0.00565429,0.00168318,0.00354178,0.00288930,0.00276725] -vel_y = [0.0,-0.00412490,0.00483525,0.00137102,0.00114527,-0.00170702] -vel_z = [0.0,-0.00190589,0.00192462,0.00055029,0.00039677,-0.00136504] -vel = ArrayPartition(vel_x,vel_y,vel_z) - -tspan = (0.,200_000) - - -const ∑ = sum -const N = 6 -potential(p, t, x, y, z, M) = -G*∑(i->∑(j->(M[i]*M[j])/sqrt((x[i]-x[j])^2 + (y[i]-y[j])^2 + (z[i]-z[j])^2), 1:i-1), 2:N) - - -nprob = NBodyProblem(potential, M, pos, vel, tspan) -sol = solve(nprob,Yoshida6(), dt=100); - - -orbitplot(sol,body_names=planets) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/ode_extras/feagin.jl b/script/ode_extras/feagin.jl deleted file mode 100644 index f029f8dc..00000000 --- a/script/ode_extras/feagin.jl +++ /dev/null @@ -1,35 +0,0 @@ - -using DifferentialEquations -const linear_bigα = big(1.01) -f(u,p,t) = (linear_bigα*u) - -# Add analytical solution so that errors are checked -f_analytic(u0,p,t) = u0*exp(linear_bigα*t) -ff = ODEFunction(f,analytic=f_analytic) -prob = ODEProblem(ff,big(0.5),(0.0,1.0)) -sol = solve(prob,Feagin14(),dt=1//16,adaptive=false); - - -println(sol.errors) - - -eps(Float64) - - -sol =solve(prob,Feagin14()); -println(sol.errors); print("The length was $(length(sol))") - - -using DiffEqDevTools -dts = 1.0 ./ 2.0 .^(10:-1:4) -sim = test_convergence(dts,prob,Feagin14()) - - -using Plots -gr() -plot(sim) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/ode_extras/monte_carlo_parameter_estim.jl b/script/ode_extras/monte_carlo_parameter_estim.jl deleted file mode 100644 index 4148c5ca..00000000 --- a/script/ode_extras/monte_carlo_parameter_estim.jl +++ /dev/null @@ -1,74 +0,0 @@ - -using DifferentialEquations, DiffEqParamEstim, Plots, Optim - -# Monte Carlo Problem Set Up for solving set of ODEs with different initial conditions - -# Set up Lotka-Volterra system -function pf_func(du,u,p,t) - du[1] = p[1] * u[1] - p[2] * u[1]*u[2] - du[2] = -3 * u[2] + u[1]*u[2] -end -p = [1.5,1.0] -prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),p) - - -# Setting up to solve the problem N times (for the N different initial conditions) -N = 10; -initial_conditions = [[1.0,1.0], [1.0,1.5], [1.5,1.0], [1.5,1.5], [0.5,1.0], [1.0,0.5], [0.5,0.5], [2.0,1.0], [1.0,2.0], [2.0,2.0]] -function prob_func(prob,i,repeat) - ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p) -end -monte_prob = MonteCarloProblem(prob,prob_func=prob_func) - - -# Check above does what we want -sim = solve(monte_prob,Tsit5(),num_monte=N) -plot(sim) - - -# Generate a dataset from these runs -data_times = 0.0:0.1:10.0 -sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times) -data = Array(sim) - - -# Building a loss function -losses = [L2Loss(data_times,data[:,:,i]) for i in 1:N] - - -loss(sim) = sum(losses[i](sim[i]) for i in 1:N) - - -prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),[1.2,0.8]) -function prob_func(prob,i,repeat) - ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p) -end -monte_prob = MonteCarloProblem(prob,prob_func=prob_func) -sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times) -loss(sim) - - -obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N, - saveat=data_times) - - -lower = zeros(2) -upper = fill(2.0,2) -result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS())) - - -result - - -obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N, - abstol=1e-8,reltol=1e-8, - saveat=data_times) -result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS())) - - -result - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/ode_extras/ode_minmax.jl b/script/ode_extras/ode_minmax.jl deleted file mode 100644 index 212157a7..00000000 --- a/script/ode_extras/ode_minmax.jl +++ /dev/null @@ -1,88 +0,0 @@ - -#Constants and setup -using OrdinaryDiffEq -initial = [0.01, 0.01, 0.01, 0.01] -tspan = (0.,100.) - -#Define the problem -function double_pendulum_hamiltonian(udot,u,p,t) - α = u[1] - lα = u[2] - β = u[3] - lβ = u[4] - udot .= - [2(lα-(1+cos(β))lβ)/(3-cos(2β)), - -2sin(α) - sin(α+β), - 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)), - -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)] -end - -#Pass to solvers -poincare = ODEProblem(double_pendulum_hamiltonian, initial, tspan) - - -sol = solve(poincare, Tsit5()) - - -using Plots; gr() -plot(sol, vars=[(0,3),(0,4)], leg=false, plotdensity=10000) - - -plot(sol, vars=(3,4), leg=false) - - -f = (t) -> sol(t,idxs=4) - - -using Optim -opt = optimize(f,18.0,22.0) - - -println(opt.minimizer) -println(opt.minimum) - - -f = (t) -> -sol(first(t),idxs=4) -opt2 = optimize(f,0.0,22.0) - - -plot(sol, vars=(0,4), plotdensity=10000) -scatter!([opt.minimizer],[opt.minimum],label="Local Min") -scatter!([opt2.minimizer],[-opt2.minimum],label="Local Max") - - -f = (t) -> -sol(first(t),idxs=4) -opt = optimize(f,[20.0],BFGS()) - - -import NLopt, ForwardDiff - -count = 0 # keep track of # function evaluations - -function g(t::Vector, grad::Vector) - if length(grad) > 0 - #use ForwardDiff for the gradients - grad[1] = ForwardDiff.derivative((t)->sol(first(t),idxs=4),t) - end - sol(first(t),idxs=4) -end -opt = NLopt.Opt(:GN_ORIG_DIRECT_L, 1) -NLopt.lower_bounds!(opt, [0.0]) -NLopt.upper_bounds!(opt, [40.0]) -NLopt.xtol_rel!(opt,1e-8) -NLopt.min_objective!(opt, g) -(minf,minx,ret) = NLopt.optimize(opt,[20.0]) -println(minf," ",minx," ",ret) -NLopt.max_objective!(opt, g) -(maxf,maxx,ret) = NLopt.optimize(opt,[20.0]) -println(maxf," ",maxx," ",ret) - - -plot(sol, vars=(0,4), plotdensity=10000) -scatter!([minx],[minf],label="Global Min") -scatter!([maxx],[maxf],label="Global Max") - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/test.jl b/script/test.jl deleted file mode 100644 index ef9d4119..00000000 --- a/script/test.jl +++ /dev/null @@ -1,4 +0,0 @@ - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/type_handling/number_types.jl b/script/type_handling/number_types.jl deleted file mode 100644 index 9282ec72..00000000 --- a/script/type_handling/number_types.jl +++ /dev/null @@ -1,53 +0,0 @@ - -using DifferentialEquations -f = (u,p,t) -> (p*u) -prob_ode_linear = ODEProblem(f,1/2,(0.0,1.0),1.01); - - -prob = prob_ode_linear -sol =solve(prob,Tsit5()) -println(sol) - - -prob = ODEProblem(f,1/2,(0//1,1//1),101//100); -sol = solve(prob,RK4(),dt=1//2^(6),adaptive=false) -println(sol) - - -prob = ODEProblem(f,BigInt(1)//BigInt(2),(0//1,1//1),101//100); -sol =solve(prob,RK4(),dt=1//2^(6),adaptive=false) -println(sol[end]) - - -prob_ode_biglinear = ODEProblem(f,big(1.0)/big(2.0),(big(0.0),big(1.0)),big(1.01)) -sol =solve(prob_ode_biglinear,Tsit5()) -println(sol[end]) - - -using DoubleFloats -prob_ode_doublelinear = ODEProblem(f,Double64(1)/Double64(2),(Double64(0),Double64(1)),Double64(1.01)) -sol =solve(prob_ode_doublelinear,Tsit5()) -println(sol[end]) - - -using ArbNumerics -prob_ode_arbfloatlinear = ODEProblem(f,ArbFloat(1)/ArbFloat(2),(ArbFloat(0.0),ArbFloat(1.0)),ArbFloat(1.01)) -sol =solve(prob_ode_arbfloatlinear,Tsit5()) -println(sol[end]) - - -using DecFP -prob_ode_decfplinear = ODEProblem(f,Dec128(1)/Dec128(2),(Dec128(0.0),Dec128(1.0)),Dec128(1.01)) -sol =solve(prob_ode_decfplinear,Tsit5()) -println(sol[end]); println(typeof(sol[end])) - - -using Decimals -prob_ode_decimallinear = ODEProblem(f,[decimal("1.0")]./[decimal("2.0")],(0//1,1//1),decimal(1.01)) -sol =solve(prob_ode_decimallinear,RK4(),dt=1/2^(6)) #Fails -println(sol[end]); println(typeof(sol[end])) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/type_handling/uncertainties.jl b/script/type_handling/uncertainties.jl deleted file mode 100644 index 76dcbcb6..00000000 --- a/script/type_handling/uncertainties.jl +++ /dev/null @@ -1,107 +0,0 @@ - -using Measurements - -5.23 ± 0.14 === 5.23 ± 0.14 - - -(5.23± 0.14) - (5.23 ± 0.14) - - -(5.23 ± 0.14) / (5.23 ± 0.14) - - -x = 5.23 ± 0.14 -x === x - - -x - x - - -x / x - - -using DifferentialEquations, Measurements, Plots - -# Half-life and mean lifetime of radiocarbon, in years -t_12 = 5730 ± 40 -τ = t_12 / log(2) - -#Setup -u₀ = 1 ± 0 -tspan = (0.0, 10000.0) - -#Define the problem -radioactivedecay(u,p,t) = - u / τ - -#Pass to solver -prob = ODEProblem(radioactivedecay, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-8) - -# Analytic solution -u = exp.(- sol.t / τ) - -plot(sol.t, sol.u, label = "Numerical", xlabel = "Years", ylabel = "Fraction of Carbon-14") -plot!(sol.t, u, label = "Analytic") - - -println("Quantity of carbon-14 after ", sol.t[11], " years:") -println("Numerical: ", sol[11]) -println("Analytic: ", u[11]) - - -using DifferentialEquations, Measurements, Plots - -g = 9.79 ± 0.02; # Gravitational constants -L = 1.00 ± 0.01; # Length of the pendulum - -#Initial Conditions -u₀ = [0 ± 0, π / 60 ± 0.01] # Initial speed and initial angle -tspan = (0.0, 6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L)*θ -end - -#Pass to solvers -prob = ODEProblem(simplependulum, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-6) - -# Analytic solution -u = u₀[2] .* cos.(sqrt(g / L) .* sol.t) - -plot(sol.t, getindex.(sol.u, 2), label = "Numerical") -plot!(sol.t, u, label = "Analytic") - - -plot(sol.t, getindex.(sol.u, 2) .- u, label = "") - - -g = 9.79 ± 0.02; # Gravitational constants -L = 1.00 ± 0.01; # Length of the pendulum - -#Initial Conditions -u₀ = [0 ± 0, π / 3 ± 0.02] # Initial speed and initial angle -tspan = (0.0, 6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L) * sin(θ) -end - -#Pass to solvers -prob = ODEProblem(simplependulum, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-6) - -plot(sol.t, getindex.(sol.u, 2), label = "Numerical") - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/type_handling/unitful.jl b/script/type_handling/unitful.jl deleted file mode 100644 index 6a853650..00000000 --- a/script/type_handling/unitful.jl +++ /dev/null @@ -1,41 +0,0 @@ - -using Unitful -t = 1.0u"s" - - -t2 = 1.02u"s" -t+t2 - - -t*t2 - - -sqrt(t) - - -t + sqrt(t) - - -using DifferentialEquations -f = (y,p,t) -> 0.5*y -u0 = 1.5u"N" -prob = ODEProblem(f,u0,(0.0u"s",1.0u"s")) -sol = solve(prob,Tsit5()) - - -f = (y,p,t) -> 0.5*y/3.0u"s" -prob = ODEProblem(f,u0,(0.0u"s",1.0u"s")) -sol = solve(prob,Tsit5()) - - -print(sol[:]) - - -using Plots -gr() -plot(ustrip(sol.t),ustrip(sol[:]),lw=3) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/src/DiffEqTutorials.jl b/src/DiffEqTutorials.jl deleted file mode 100644 index ab8261e5..00000000 --- a/src/DiffEqTutorials.jl +++ /dev/null @@ -1,113 +0,0 @@ -module DiffEqTutorials - -using Weave, Pkg, InteractiveUtils, IJulia - -repo_directory = joinpath(@__DIR__,"..") -cssfile = joinpath(@__DIR__, "..", "templates", "skeleton_css.css") -latexfile = joinpath(@__DIR__, "..", "templates", "julia_tex.tpl") - -function weave_file(folder,file,build_list=(:script,:html,:pdf,:notebook); kwargs...) - tmp = joinpath(repo_directory,"tutorials",folder,file) - args = Dict{Symbol,String}(:folder=>folder,:file=>file) - if :script ∈ build_list - println("Building Script") - dir = joinpath(repo_directory,"script",folder) - isdir(dir) || mkdir(dir) - tangle(tmp;out_path=dir) - end - if :html ∈ build_list - println("Building HTML") - dir = joinpath(repo_directory,"html",folder) - isdir(dir) || mkdir(dir) - weave(tmp,doctype = "md2html",out_path=dir,args=args; css=cssfile, kwargs...) - end - if :pdf ∈ build_list - println("Building PDF") - dir = joinpath(repo_directory,"pdf",folder) - isdir(dir) || mkdir(dir) - weave(tmp,doctype="md2pdf",out_path=dir,args=args; template=latexfile, kwargs...) - end - if :github ∈ build_list - println("Building Github Markdown") - dir = joinpath(repo_directory,"markdown",folder) - isdir(dir) || mkdir(dir) - weave(tmp,doctype = "github",out_path=dir,args=args; kwargs...) - end - if :notebook ∈ build_list - println("Building Notebook") - dir = joinpath(repo_directory,"notebook",folder) - isdir(dir) || mkdir(dir) - Weave.convert_doc(tmp,joinpath(dir,file[1:end-4]*".ipynb")) - end -end - -function weave_all() - for folder in readdir(joinpath(repo_directory,"tutorials")) - folder == "test.jmd" && continue - weave_folder(folder) - end -end - -function weave_folder(folder) - for file in readdir(joinpath(repo_directory,"tutorials",folder)) - println("Building $(joinpath(folder,file)))") - try - weave_file(folder,file) - catch - end - end -end - -function tutorial_footer(folder=nothing, file=nothing; remove_homedir=true) - display("text/markdown", """ - ## Appendix - - This tutorial is part of the DiffEqTutorials.jl repository, found at: - """) - if folder !== nothing && file !== nothing - display("text/markdown", """ - To locally run this tutorial, do the following commands: - ``` - using DiffEqTutorials - DiffEqTutorials.weave_file("$folder","$file") - ``` - """) - end - display("text/markdown", "Computer Information:") - vinfo = sprint(InteractiveUtils.versioninfo) - display("text/markdown", """ - ``` - $(vinfo) - ``` - """) - - ctx = Pkg.API.Context() - pkgs = Pkg.Display.status(Pkg.API.Context(), use_as_api=true); - projfile = ctx.env.project_file - remove_homedir && (projfile = replace(projfile, homedir() => "~")) - - display("text/markdown",""" - Package Information: - """) - - md = "" - md *= "```\nStatus `$(projfile)`\n" - - for pkg in pkgs - if pkg.old.ver != nothing - md *= "[$(string(pkg.uuid))] $(string(pkg.name)) $(string(pkg.old.ver))\n" - else - md *= "[$(string(pkg.uuid))] $(string(pkg.name))\n" - end - end - md *= "```" - display("text/markdown", md) -end - -function open_notebooks() - Base.eval(Main, Meta.parse("import IJulia")) - path = joinpath(repo_directory,"notebook") - IJulia.notebook(;dir=path) -end - -end diff --git a/src/SciMLTutorials.jl b/src/SciMLTutorials.jl new file mode 100644 index 00000000..7841c3bf --- /dev/null +++ b/src/SciMLTutorials.jl @@ -0,0 +1,140 @@ +module SciMLTutorials + +using Weave, Pkg, IJulia, InteractiveUtils, Markdown + +repo_directory = joinpath(@__DIR__, "..") +cssfile = joinpath(@__DIR__, "..", "templates", "skeleton_css.css") +latexfile = joinpath(@__DIR__, "..", "templates", "julia_tex.tpl") +default_builds = (:script, :github) + +function weave_file(folder, file, build_list = default_builds) + target = joinpath(folder, file) + @info("Weaving $(target)") + + if isfile(joinpath(folder, "Project.toml")) && build_list != (:notebook,) + @info("Instantiating", folder) + Pkg.activate(joinpath(folder)) + Pkg.instantiate() + Pkg.build() + + @info("Printing out `Pkg.status()`") + Pkg.status() + end + + args = Dict{Symbol, String}(:folder=>folder, :file=>file) + if :script ∈ build_list + println("Building Script") + dir = joinpath(repo_directory, "script", basename(folder)) + mkpath(dir) + tangle(target; out_path = dir) + end + if :html ∈ build_list + println("Building HTML") + dir = joinpath(repo_directory, "html", basename(folder)) + mkpath(dir) + weave(target, doctype = "md2html", out_path = dir, + args = args, css = cssfile, fig_ext = ".svg") + end + if :pdf ∈ build_list + println("Building PDF") + dir = joinpath(repo_directory, "pdf", basename(folder)) + mkpath(dir) + try + weave(target, doctype = "md2pdf", out_path = dir, + template = latexfile, args = args) + catch ex + @warn "PDF generation failed" exception=(ex, catch_backtrace()) + end + end + if :github ∈ build_list + println("Building Github Markdown") + dir = joinpath(repo_directory, "markdown", basename(folder)) + mkpath(dir) + weave(target, doctype = "github", out_path = dir, args = args) + end + if :notebook ∈ build_list + println("Building Notebook") + dir = joinpath(repo_directory, "notebook", basename(folder)) + mkpath(dir) + Weave.convert_doc(target, joinpath(dir, file[1:(end - 4)]*".ipynb")) + end +end + +function weave_all(build_list = default_builds) + for folder in readdir(joinpath(repo_directory, "tutorials")) + folder == "test.jmd" && continue + weave_folder(joinpath(repo_directory, "tutorials", folder), build_list) + end +end + +function weave_folder(folder, build_list = default_builds) + for file in readdir(joinpath(folder)) + # Skip non-`.jmd` files + if !endswith(file, ".jmd") + continue + end + + try + weave_file(folder, file, build_list) + catch e + @error(e) + end + end +end + +function tutorial_footer(folder = nothing, file = nothing) + display(md""" + ## Appendix + + These tutorials are a part of the SciMLTutorials.jl repository, found at: . + For more information on high-performance scientific machine learning, check out the SciML Open Source Software Organization . + + """) + if folder !== nothing && file !== nothing + display(Markdown.parse(""" + To locally run this tutorial, do the following commands: + ``` + using SciMLTutorials + SciMLTutorials.weave_file("$folder","$file") + ``` + """)) + end + display(md"Computer Information:") + vinfo = sprint(InteractiveUtils.versioninfo) + display(Markdown.parse(""" + ``` + $(vinfo) + ``` + """)) + + display(md""" + Package Information: + """) + + proj = sprint(io -> Pkg.status(io = io)) + mani = sprint(io -> Pkg.status(io = io, mode = Pkg.PKGMODE_MANIFEST)) + + md = """ + ``` + $(chomp(proj)) + ``` + + And the full manifest: + + ``` + $(chomp(mani)) + ``` + """ + display(Markdown.parse(md)) +end + +function open_notebooks() + Base.eval(Main, Meta.parse("import IJulia")) + weave_all((:notebook,)) + path = joinpath(repo_directory, "notebook") + newpath = joinpath(pwd(), "generated_notebooks") + mv(path, newpath) + IJulia.notebook(; dir = newpath) +end + +end diff --git a/test/runtests.jl b/test/runtests.jl index 63652463..8d7d6e65 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,2 +1,3 @@ -using DiffEqTutorials -DiffEqTutorials.weave_file(".","test.jmd") +using SciMLTutorials +tutorials_dir = joinpath(dirname(@__DIR__), "tutorials") +SciMLTutorials.weave_file(joinpath(tutorials_dir, "Testing"), "test.jmd") diff --git a/tutorials/Testing/Manifest.toml b/tutorials/Testing/Manifest.toml new file mode 100644 index 00000000..09c3a1aa --- /dev/null +++ b/tutorials/Testing/Manifest.toml @@ -0,0 +1,878 @@ +# This file is machine-generated - editing it directly is not advised + +[[Adapt]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "f1b523983a58802c4695851926203b36e28f09db" +uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +version = "3.3.0" + +[[ArgTools]] +uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" + +[[Artifacts]] +uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" + +[[Base64]] +uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" + +[[Bzip2_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "c3598e525718abcc440f69cc6d5f60dda0a1b61e" +uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0" +version = "1.0.6+5" + +[[Cairo_jll]] +deps = ["Artifacts", "Bzip2_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"] +git-tree-sha1 = "e2f47f6d8337369411569fd45ae5753ca10394c6" +uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a" +version = "1.16.0+6" + +[[ColorSchemes]] +deps = ["ColorTypes", "Colors", "FixedPointNumbers", "Random", "StaticArrays"] +git-tree-sha1 = "c8fd01e4b736013bc61b704871d20503b33ea402" +uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4" +version = "3.12.1" + +[[ColorTypes]] +deps = ["FixedPointNumbers", "Random"] +git-tree-sha1 = "024fe24d83e4a5bf5fc80501a314ce0d1aa35597" +uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f" +version = "0.11.0" + +[[Colors]] +deps = ["ColorTypes", "FixedPointNumbers", "Reexport"] +git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40" +uuid = "5ae59095-9a9b-59fe-a467-6f913c188581" +version = "0.12.8" + +[[Compat]] +deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"] +git-tree-sha1 = "e4e2b39db08f967cc1360951f01e8a75ec441cab" +uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" +version = "3.30.0" + +[[CompilerSupportLibraries_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" + +[[Conda]] +deps = ["JSON", "VersionParsing"] +git-tree-sha1 = "299304989a5e6473d985212c28928899c74e9421" +uuid = "8f4d0f93-b110-5947-807f-2305c1781a2d" +version = "1.5.2" + +[[Contour]] +deps = ["StaticArrays"] +git-tree-sha1 = "9f02045d934dc030edad45944ea80dbd1f0ebea7" +uuid = "d38c429a-6771-53c6-b99e-75d170b6e991" +version = "0.5.7" + +[[DataAPI]] +git-tree-sha1 = "dfb3b7e89e395be1e25c2ad6d7690dc29cc53b1d" +uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a" +version = "1.6.0" + +[[DataStructures]] +deps = ["Compat", "InteractiveUtils", "OrderedCollections"] +git-tree-sha1 = "4437b64df1e0adccc3e5d1adbc3ac741095e4677" +uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" +version = "0.18.9" + +[[DataValueInterfaces]] +git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" +uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464" +version = "1.0.0" + +[[Dates]] +deps = ["Printf"] +uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" + +[[DelimitedFiles]] +deps = ["Mmap"] +uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" + +[[Distributed]] +deps = ["Random", "Serialization", "Sockets"] +uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" + +[[DocStringExtensions]] +deps = ["LibGit2", "Markdown", "Pkg", "Test"] +git-tree-sha1 = "9d4f64f79012636741cf01133158a54b24924c32" +uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" +version = "0.8.4" + +[[Downloads]] +deps = ["ArgTools", "LibCURL", "NetworkOptions"] +uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" + +[[EarCut_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "92d8f9f208637e8d2d28c664051a00569c01493d" +uuid = "5ae413db-bbd1-5e63-b57d-d24a61df00f5" +version = "2.1.5+1" + +[[Expat_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "b3bfd02e98aedfa5cf885665493c5598c350cd2f" +uuid = "2e619515-83b5-522b-bb60-26c02a35a201" +version = "2.2.10+0" + +[[FFMPEG]] +deps = ["FFMPEG_jll", "x264_jll"] +git-tree-sha1 = "9a73ffdc375be61b0e4516d83d880b265366fe1f" +uuid = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" +version = "0.4.0" + +[[FFMPEG_jll]] +deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "LibVPX_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "Pkg", "Zlib_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"] +git-tree-sha1 = "3cc57ad0a213808473eafef4845a74766242e05f" +uuid = "b22a6f82-2f65-5046-a5b2-351ab43fb4e5" +version = "4.3.1+4" + +[[FileWatching]] +uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" + +[[FixedPointNumbers]] +deps = ["Statistics"] +git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc" +uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93" +version = "0.8.4" + +[[Fontconfig_jll]] +deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"] +git-tree-sha1 = "35895cf184ceaab11fd778b4590144034a167a2f" +uuid = "a3f928ae-7b40-5064-980b-68af3947d34b" +version = "2.13.1+14" + +[[Formatting]] +deps = ["Printf"] +git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8" +uuid = "59287772-0a20-5a39-b81b-1366585eb4c0" +version = "0.4.2" + +[[FreeType2_jll]] +deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] +git-tree-sha1 = "cbd58c9deb1d304f5a245a0b7eb841a2560cfec6" +uuid = "d7e528f0-a631-5988-bf34-fe36492bcfd7" +version = "2.10.1+5" + +[[FriBidi_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "0d20aed5b14dd4c9a2453c1b601d08e1149679cc" +uuid = "559328eb-81f9-559d-9380-de523a88c83c" +version = "1.0.5+6" + +[[GLFW_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Pkg", "Xorg_libXcursor_jll", "Xorg_libXi_jll", "Xorg_libXinerama_jll", "Xorg_libXrandr_jll"] +git-tree-sha1 = "a199aefead29c3c2638c3571a9993b564109d45a" +uuid = "0656b61e-2033-5cc2-a64a-77c0f6c09b89" +version = "3.3.4+0" + +[[GR]] +deps = ["Base64", "DelimitedFiles", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Pkg", "Printf", "Random", "Serialization", "Sockets", "Test", "UUIDs"] +git-tree-sha1 = "011458b83178ac913dc4eb73b229af45bdde5d83" +uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71" +version = "0.57.4" + +[[GR_jll]] +deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Pkg", "Qt5Base_jll", "Zlib_jll", "libpng_jll"] +git-tree-sha1 = "90acee5c38f4933342fa9a3bbc483119d20e7033" +uuid = "d2c73de3-f751-5644-a686-071e5b155ba9" +version = "0.57.2+0" + +[[GeometryBasics]] +deps = ["EarCut_jll", "IterTools", "LinearAlgebra", "StaticArrays", "StructArrays", "Tables"] +git-tree-sha1 = "4136b8a5668341e58398bb472754bff4ba0456ff" +uuid = "5c1252a2-5f33-56bf-86c9-59e7332b4326" +version = "0.3.12" + +[[Gettext_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "XML2_jll"] +git-tree-sha1 = "9b02998aba7bf074d14de89f9d37ca24a1a0b046" +uuid = "78b55507-aeef-58d4-861c-77aaff3498b1" +version = "0.21.0+0" + +[[Glib_jll]] +deps = ["Artifacts", "Gettext_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Libiconv_jll", "Libmount_jll", "PCRE_jll", "Pkg", "Zlib_jll"] +git-tree-sha1 = "47ce50b742921377301e15005c96e979574e130b" +uuid = "7746bdde-850d-59dc-9ae8-88ece973131d" +version = "2.68.1+0" + +[[Grisu]] +git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2" +uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe" +version = "1.0.2" + +[[HTTP]] +deps = ["Base64", "Dates", "IniFile", "MbedTLS", "NetworkOptions", "Sockets", "URIs"] +git-tree-sha1 = "1fd26bc48f96adcdd8823f7fc300053faf3d7ba1" +uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3" +version = "0.9.9" + +[[Highlights]] +deps = ["DocStringExtensions", "InteractiveUtils", "REPL"] +git-tree-sha1 = "f823a2d04fb233d52812c8024a6d46d9581904a4" +uuid = "eafb193a-b7ab-5a9e-9068-77385905fa72" +version = "0.4.5" + +[[IJulia]] +deps = ["Base64", "Conda", "Dates", "InteractiveUtils", "JSON", "Libdl", "Markdown", "MbedTLS", "Pkg", "Printf", "REPL", "Random", "SoftGlobalScope", "Test", "UUIDs", "ZMQ"] +git-tree-sha1 = "d8b9c31196e1dd92181cd0f5760ca2d2ffb4ac0f" +uuid = "7073ff75-c697-5162-941a-fcdaad2a7d2a" +version = "1.23.2" + +[[IniFile]] +deps = ["Test"] +git-tree-sha1 = "098e4d2c533924c921f9f9847274f2ad89e018b8" +uuid = "83e8ac13-25f8-5344-8a64-a9f2b223428f" +version = "0.5.0" + +[[InteractiveUtils]] +deps = ["Markdown"] +uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" + +[[IterTools]] +git-tree-sha1 = "05110a2ab1fc5f932622ffea2a003221f4782c18" +uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e" +version = "1.3.0" + +[[IteratorInterfaceExtensions]] +git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856" +uuid = "82899510-4779-5014-852e-03e436cf321d" +version = "1.0.0" + +[[JLLWrappers]] +deps = ["Preferences"] +git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e" +uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" +version = "1.3.0" + +[[JSON]] +deps = ["Dates", "Mmap", "Parsers", "Unicode"] +git-tree-sha1 = "81690084b6198a2e1da36fcfda16eeca9f9f24e4" +uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" +version = "0.21.1" + +[[JpegTurbo_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "9aff0587d9603ea0de2c6f6300d9f9492bbefbd3" +uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8" +version = "2.0.1+3" + +[[LAME_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "df381151e871f41ee86cee4f5f6fd598b8a68826" +uuid = "c1c5ebd0-6772-5130-a774-d5fcae4a789d" +version = "3.100.0+3" + +[[LZO_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "e5b909bcf985c5e2605737d2ce278ed791b89be6" +uuid = "dd4b983a-f0e5-5f8d-a1b7-129d4a5fb1ac" +version = "2.10.1+0" + +[[LaTeXStrings]] +git-tree-sha1 = "c7f1c695e06c01b95a67f0cd1d34994f3e7db104" +uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" +version = "1.2.1" + +[[Latexify]] +deps = ["Formatting", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "Printf", "Requires"] +git-tree-sha1 = "f77a16cb3804f4a74f57e5272a6a4a9a628577cb" +uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" +version = "0.15.5" + +[[LibCURL]] +deps = ["LibCURL_jll", "MozillaCACerts_jll"] +uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" + +[[LibCURL_jll]] +deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] +uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" + +[[LibGit2]] +deps = ["Base64", "NetworkOptions", "Printf", "SHA"] +uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" + +[[LibSSH2_jll]] +deps = ["Artifacts", "Libdl", "MbedTLS_jll"] +uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" + +[[LibVPX_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "85fcc80c3052be96619affa2fe2e6d2da3908e11" +uuid = "dd192d2f-8180-539f-9fb4-cc70b1dcf69a" +version = "1.9.0+1" + +[[Libdl]] +uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" + +[[Libffi_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "761a393aeccd6aa92ec3515e428c26bf99575b3b" +uuid = "e9f186c6-92d2-5b65-8a66-fee21dc1b490" +version = "3.2.2+0" + +[[Libgcrypt_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgpg_error_jll", "Pkg"] +git-tree-sha1 = "64613c82a59c120435c067c2b809fc61cf5166ae" +uuid = "d4300ac3-e22c-5743-9152-c294e39db1e4" +version = "1.8.7+0" + +[[Libglvnd_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll", "Xorg_libXext_jll"] +git-tree-sha1 = "7739f837d6447403596a75d19ed01fd08d6f56bf" +uuid = "7e76a0d4-f3c7-5321-8279-8d96eeed0f29" +version = "1.3.0+3" + +[[Libgpg_error_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "c333716e46366857753e273ce6a69ee0945a6db9" +uuid = "7add5ba3-2f88-524e-9cd5-f83b8a55f7b8" +version = "1.42.0+0" + +[[Libiconv_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "8d22e127ea9a0917bc98ebd3755c8bd31989381e" +uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531" +version = "1.16.1+0" + +[[Libmount_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "9c30530bf0effd46e15e0fdcf2b8636e78cbbd73" +uuid = "4b2f31a3-9ecc-558c-b454-b3730dcb73e9" +version = "2.35.0+0" + +[[Libtiff_jll]] +deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Pkg", "Zlib_jll", "Zstd_jll"] +git-tree-sha1 = "291dd857901f94d683973cdf679984cdf73b56d0" +uuid = "89763e89-9b03-5906-acba-b20f662cd828" +version = "4.1.0+2" + +[[Libuuid_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "7f3efec06033682db852f8b3bc3c1d2b0a0ab066" +uuid = "38a345b3-de98-5d2b-a5d3-14cd9215e700" +version = "2.36.0+0" + +[[LinearAlgebra]] +deps = ["Libdl"] +uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" + +[[Logging]] +uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" + +[[MacroTools]] +deps = ["Markdown", "Random"] +git-tree-sha1 = "6a8a2a625ab0dea913aba95c11370589e0239ff0" +uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" +version = "0.5.6" + +[[Markdown]] +deps = ["Base64"] +uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" + +[[MbedTLS]] +deps = ["Dates", "MbedTLS_jll", "Random", "Sockets"] +git-tree-sha1 = "1c38e51c3d08ef2278062ebceade0e46cefc96fe" +uuid = "739be429-bea8-5141-9913-cc70e7f3736d" +version = "1.0.3" + +[[MbedTLS_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" + +[[Measures]] +git-tree-sha1 = "e498ddeee6f9fdb4551ce855a46f54dbd900245f" +uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e" +version = "0.3.1" + +[[Missings]] +deps = ["DataAPI"] +git-tree-sha1 = "4ea90bd5d3985ae1f9a908bd4500ae88921c5ce7" +uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28" +version = "1.0.0" + +[[Mmap]] +uuid = "a63ad114-7e13-5084-954f-fe012c677804" + +[[MozillaCACerts_jll]] +uuid = "14a3606d-f60d-562e-9121-12d972cd8159" + +[[Mustache]] +deps = ["Printf", "Tables"] +git-tree-sha1 = "36995ef0d532fe08119d70b2365b7b03d4e00f48" +uuid = "ffc61752-8dc7-55ee-8c37-f3e9cdd09e70" +version = "1.0.10" + +[[NaNMath]] +git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb" +uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" +version = "0.3.5" + +[[NetworkOptions]] +uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" + +[[Ogg_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "a42c0f138b9ebe8b58eba2271c5053773bde52d0" +uuid = "e7412a2a-1a6e-54c0-be00-318e2571c051" +version = "1.3.4+2" + +[[OpenSSL_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "71bbbc616a1d710879f5a1021bcba65ffba6ce58" +uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" +version = "1.1.1+6" + +[[Opus_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "f9d57f4126c39565e05a2b0264df99f497fc6f37" +uuid = "91d4177d-7536-5919-b921-800302f37372" +version = "1.3.1+3" + +[[OrderedCollections]] +git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c" +uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" +version = "1.4.1" + +[[PCRE_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "b2a7af664e098055a7529ad1a900ded962bca488" +uuid = "2f80f16e-611a-54ab-bc61-aa92de5b98fc" +version = "8.44.0+0" + +[[Parsers]] +deps = ["Dates"] +git-tree-sha1 = "c8abc88faa3f7a3950832ac5d6e690881590d6dc" +uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" +version = "1.1.0" + +[[Pixman_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "b4f5d02549a10e20780a24fce72bea96b6329e29" +uuid = "30392449-352a-5448-841d-b1acce4e97dc" +version = "0.40.1+0" + +[[Pkg]] +deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] +uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" + +[[PlotThemes]] +deps = ["PlotUtils", "Requires", "Statistics"] +git-tree-sha1 = "a3a964ce9dc7898193536002a6dd892b1b5a6f1d" +uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a" +version = "2.0.1" + +[[PlotUtils]] +deps = ["ColorSchemes", "Colors", "Dates", "Printf", "Random", "Reexport", "Statistics"] +git-tree-sha1 = "ae9a295ac761f64d8c2ec7f9f24d21eb4ffba34d" +uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043" +version = "1.0.10" + +[[Plots]] +deps = ["Base64", "Contour", "Dates", "FFMPEG", "FixedPointNumbers", "GR", "GeometryBasics", "JSON", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs"] +git-tree-sha1 = "f3a57a5acc16a69c03539b3684354cbbbb72c9ad" +uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" +version = "1.15.2" + +[[Preferences]] +deps = ["TOML"] +git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a" +uuid = "21216c6a-2e73-6563-6e65-726566657250" +version = "1.2.2" + +[[Printf]] +deps = ["Unicode"] +uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" + +[[Qt5Base_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "Fontconfig_jll", "Glib_jll", "JLLWrappers", "Libdl", "Libglvnd_jll", "OpenSSL_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libxcb_jll", "Xorg_xcb_util_image_jll", "Xorg_xcb_util_keysyms_jll", "Xorg_xcb_util_renderutil_jll", "Xorg_xcb_util_wm_jll", "Zlib_jll", "xkbcommon_jll"] +git-tree-sha1 = "16626cfabbf7206d60d84f2bf4725af7b37d4a77" +uuid = "ea2cea3b-5b76-57ae-a6ef-0a8af62496e1" +version = "5.15.2+0" + +[[REPL]] +deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] +uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" + +[[Random]] +deps = ["Serialization"] +uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" + +[[RecipesBase]] +git-tree-sha1 = "b3fb709f3c97bfc6e948be68beeecb55a0b340ae" +uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" +version = "1.1.1" + +[[RecipesPipeline]] +deps = ["Dates", "NaNMath", "PlotUtils", "RecipesBase"] +git-tree-sha1 = "7a5026a6741c14147d1cb6daf2528a77ca28eb51" +uuid = "01d81517-befc-4cb6-b9ec-a95719d0359c" +version = "0.3.2" + +[[Reexport]] +git-tree-sha1 = "57d8440b0c7d98fc4f889e478e80f268d534c9d5" +uuid = "189a3867-3050-52da-a836-e630ba90ab69" +version = "1.0.0" + +[[Requires]] +deps = ["UUIDs"] +git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621" +uuid = "ae029012-a4dd-5104-9daa-d747884805df" +version = "1.1.3" + +[[SHA]] +uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" + +[[SciMLTutorials]] +deps = ["IJulia", "InteractiveUtils", "Pkg", "Plots", "Weave"] +git-tree-sha1 = "6d721be72323edd91679318c05aca8479bc7b20f" +uuid = "30cb0354-2223-46a9-baa0-41bdcfbe0178" +version = "0.9.0" + +[[Scratch]] +deps = ["Dates"] +git-tree-sha1 = "ad4b278adb62d185bbcb6864dc24959ab0627bf6" +uuid = "6c6a2e73-6563-6170-7368-637461726353" +version = "1.0.3" + +[[Serialization]] +uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" + +[[SharedArrays]] +deps = ["Distributed", "Mmap", "Random", "Serialization"] +uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383" + +[[Showoff]] +deps = ["Dates", "Grisu"] +git-tree-sha1 = "91eddf657aca81df9ae6ceb20b959ae5653ad1de" +uuid = "992d4aef-0814-514b-bc4d-f2e9a6c4116f" +version = "1.0.3" + +[[Sockets]] +uuid = "6462fe0b-24de-5631-8697-dd941f90decc" + +[[SoftGlobalScope]] +deps = ["REPL"] +git-tree-sha1 = "986ec2b6162ccb95de5892ed17832f95badf770c" +uuid = "b85f4697-e234-5449-a836-ec8e2f98b302" +version = "1.1.0" + +[[SortingAlgorithms]] +deps = ["DataStructures"] +git-tree-sha1 = "2ec1962eba973f383239da22e75218565c390a96" +uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" +version = "1.0.0" + +[[SparseArrays]] +deps = ["LinearAlgebra", "Random"] +uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + +[[StaticArrays]] +deps = ["LinearAlgebra", "Random", "Statistics"] +git-tree-sha1 = "c635017268fd51ed944ec429bcc4ad010bcea900" +uuid = "90137ffa-7385-5640-81b9-e52037218182" +version = "1.2.0" + +[[Statistics]] +deps = ["LinearAlgebra", "SparseArrays"] +uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" + +[[StatsAPI]] +git-tree-sha1 = "1958272568dc176a1d881acb797beb909c785510" +uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0" +version = "1.0.0" + +[[StatsBase]] +deps = ["DataAPI", "DataStructures", "LinearAlgebra", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"] +git-tree-sha1 = "2f6792d523d7448bbe2fec99eca9218f06cc746d" +uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" +version = "0.33.8" + +[[StructArrays]] +deps = ["Adapt", "DataAPI", "Tables"] +git-tree-sha1 = "44b3afd37b17422a62aea25f04c1f7e09ce6b07f" +uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" +version = "0.5.1" + +[[TOML]] +deps = ["Dates"] +uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" + +[[TableTraits]] +deps = ["IteratorInterfaceExtensions"] +git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39" +uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c" +version = "1.0.1" + +[[Tables]] +deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "TableTraits", "Test"] +git-tree-sha1 = "c9d2d262e9a327be1f35844df25fe4561d258dc9" +uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" +version = "1.4.2" + +[[Tar]] +deps = ["ArgTools", "SHA"] +uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" + +[[Test]] +deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] +uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[[URIs]] +git-tree-sha1 = "97bbe755a53fe859669cd907f2d96aee8d2c1355" +uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4" +version = "1.3.0" + +[[UUIDs]] +deps = ["Random", "SHA"] +uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" + +[[Unicode]] +uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" + +[[VersionParsing]] +git-tree-sha1 = "80229be1f670524750d905f8fc8148e5a8c4537f" +uuid = "81def892-9a0e-5fdd-b105-ffc91e053289" +version = "1.2.0" + +[[Wayland_jll]] +deps = ["Artifacts", "Expat_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg", "XML2_jll"] +git-tree-sha1 = "dc643a9b774da1c2781413fd7b6dcd2c56bb8056" +uuid = "a2964d1f-97da-50d4-b82a-358c7fce9d89" +version = "1.17.0+4" + +[[Wayland_protocols_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll"] +git-tree-sha1 = "2839f1c1296940218e35df0bbb220f2a79686670" +uuid = "2381bf8a-dfd0-557d-9999-79630e7b1b91" +version = "1.18.0+4" + +[[Weave]] +deps = ["Base64", "Dates", "Highlights", "JSON", "Markdown", "Mustache", "Pkg", "Printf", "REPL", "Requires", "Serialization", "YAML"] +git-tree-sha1 = "4afd286cd80d1c2c338f9a13356298feac7348d0" +uuid = "44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9" +version = "0.10.8" + +[[XML2_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"] +git-tree-sha1 = "1acf5bdf07aa0907e0a37d3718bb88d4b687b74a" +uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a" +version = "2.9.12+0" + +[[XSLT_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgcrypt_jll", "Libgpg_error_jll", "Libiconv_jll", "Pkg", "XML2_jll", "Zlib_jll"] +git-tree-sha1 = "91844873c4085240b95e795f692c4cec4d805f8a" +uuid = "aed1982a-8fda-507f-9586-7b0439959a61" +version = "1.1.34+0" + +[[Xorg_libX11_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll", "Xorg_xtrans_jll"] +git-tree-sha1 = "5be649d550f3f4b95308bf0183b82e2582876527" +uuid = "4f6342f7-b3d2-589e-9d20-edeb45f2b2bc" +version = "1.6.9+4" + +[[Xorg_libXau_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "4e490d5c960c314f33885790ed410ff3a94ce67e" +uuid = "0c0b7dd1-d40b-584c-a123-a41640f87eec" +version = "1.0.9+4" + +[[Xorg_libXcursor_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXfixes_jll", "Xorg_libXrender_jll"] +git-tree-sha1 = "12e0eb3bc634fa2080c1c37fccf56f7c22989afd" +uuid = "935fb764-8cf2-53bf-bb30-45bb1f8bf724" +version = "1.2.0+4" + +[[Xorg_libXdmcp_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "4fe47bd2247248125c428978740e18a681372dd4" +uuid = "a3789734-cfe1-5b06-b2d0-1dd0d9d62d05" +version = "1.1.3+4" + +[[Xorg_libXext_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] +git-tree-sha1 = "b7c0aa8c376b31e4852b360222848637f481f8c3" +uuid = "1082639a-0dae-5f34-9b06-72781eeb8cb3" +version = "1.3.4+4" + +[[Xorg_libXfixes_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] +git-tree-sha1 = "0e0dc7431e7a0587559f9294aeec269471c991a4" +uuid = "d091e8ba-531a-589c-9de9-94069b037ed8" +version = "5.0.3+4" + +[[Xorg_libXi_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXfixes_jll"] +git-tree-sha1 = "89b52bc2160aadc84d707093930ef0bffa641246" +uuid = "a51aa0fd-4e3c-5386-b890-e753decda492" +version = "1.7.10+4" + +[[Xorg_libXinerama_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll"] +git-tree-sha1 = "26be8b1c342929259317d8b9f7b53bf2bb73b123" +uuid = "d1454406-59df-5ea1-beac-c340f2130bc3" +version = "1.1.4+4" + +[[Xorg_libXrandr_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll"] +git-tree-sha1 = "34cea83cb726fb58f325887bf0612c6b3fb17631" +uuid = "ec84b674-ba8e-5d96-8ba1-2a689ba10484" +version = "1.5.2+4" + +[[Xorg_libXrender_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] +git-tree-sha1 = "19560f30fd49f4d4efbe7002a1037f8c43d43b96" +uuid = "ea2f1a96-1ddc-540d-b46f-429655e07cfa" +version = "0.9.10+4" + +[[Xorg_libpthread_stubs_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "6783737e45d3c59a4a4c4091f5f88cdcf0908cbb" +uuid = "14d82f49-176c-5ed1-bb49-ad3f5cbd8c74" +version = "0.1.0+3" + +[[Xorg_libxcb_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "XSLT_jll", "Xorg_libXau_jll", "Xorg_libXdmcp_jll", "Xorg_libpthread_stubs_jll"] +git-tree-sha1 = "daf17f441228e7a3833846cd048892861cff16d6" +uuid = "c7cfdc94-dc32-55de-ac96-5a1b8d977c5b" +version = "1.13.0+3" + +[[Xorg_libxkbfile_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] +git-tree-sha1 = "926af861744212db0eb001d9e40b5d16292080b2" +uuid = "cc61e674-0454-545c-8b26-ed2c68acab7a" +version = "1.1.0+4" + +[[Xorg_xcb_util_image_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] +git-tree-sha1 = "0fab0a40349ba1cba2c1da699243396ff8e94b97" +uuid = "12413925-8142-5f55-bb0e-6d7ca50bb09b" +version = "0.4.0+1" + +[[Xorg_xcb_util_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll"] +git-tree-sha1 = "e7fd7b2881fa2eaa72717420894d3938177862d1" +uuid = "2def613f-5ad1-5310-b15b-b15d46f528f5" +version = "0.4.0+1" + +[[Xorg_xcb_util_keysyms_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] +git-tree-sha1 = "d1151e2c45a544f32441a567d1690e701ec89b00" +uuid = "975044d2-76e6-5fbe-bf08-97ce7c6574c7" +version = "0.4.0+1" + +[[Xorg_xcb_util_renderutil_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] +git-tree-sha1 = "dfd7a8f38d4613b6a575253b3174dd991ca6183e" +uuid = "0d47668e-0667-5a69-a72c-f761630bfb7e" +version = "0.3.9+1" + +[[Xorg_xcb_util_wm_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] +git-tree-sha1 = "e78d10aab01a4a154142c5006ed44fd9e8e31b67" +uuid = "c22f9ab0-d5fe-5066-847c-f4bb1cd4e361" +version = "0.4.1+1" + +[[Xorg_xkbcomp_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxkbfile_jll"] +git-tree-sha1 = "4bcbf660f6c2e714f87e960a171b119d06ee163b" +uuid = "35661453-b289-5fab-8a00-3d9160c6a3a4" +version = "1.4.2+4" + +[[Xorg_xkeyboard_config_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xkbcomp_jll"] +git-tree-sha1 = "5c8424f8a67c3f2209646d4425f3d415fee5931d" +uuid = "33bec58e-1273-512f-9401-5d533626f822" +version = "2.27.0+4" + +[[Xorg_xtrans_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "79c31e7844f6ecf779705fbc12146eb190b7d845" +uuid = "c5fb5394-a638-5e4d-96e5-b29de1b5cf10" +version = "1.4.0+3" + +[[YAML]] +deps = ["Base64", "Dates", "Printf"] +git-tree-sha1 = "78c02bd295bbd0ca330f95e07ccdfcb69f6cbcd4" +uuid = "ddb6d928-2868-570f-bddf-ab3f9cf99eb6" +version = "0.4.6" + +[[ZMQ]] +deps = ["FileWatching", "Sockets", "ZeroMQ_jll"] +git-tree-sha1 = "fc68e8a3719166950a0f3e390a14c7302c48f8de" +uuid = "c2297ded-f4af-51ae-bb23-16f91089e4e1" +version = "1.2.1" + +[[ZeroMQ_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "libsodium_jll"] +git-tree-sha1 = "74a74a3896b63980734cc876da8a103454559fe8" +uuid = "8f1865be-045e-5c20-9c9f-bfbfb0764568" +version = "4.3.2+6" + +[[Zlib_jll]] +deps = ["Libdl"] +uuid = "83775a58-1f1d-513f-b197-d71354ab007a" + +[[Zstd_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "cc4bf3fdde8b7e3e9fa0351bdeedba1cf3b7f6e6" +uuid = "3161d3a3-bdf6-5164-811a-617609db77b4" +version = "1.5.0+0" + +[[libass_jll]] +deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] +git-tree-sha1 = "acc685bcf777b2202a904cdcb49ad34c2fa1880c" +uuid = "0ac62f75-1d6f-5e53-bd7c-93b484bb37c0" +version = "0.14.0+4" + +[[libfdk_aac_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "7a5780a0d9c6864184b3a2eeeb833a0c871f00ab" +uuid = "f638f0a6-7fb0-5443-88ba-1cc74229b280" +version = "0.1.6+4" + +[[libpng_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] +git-tree-sha1 = "94d180a6d2b5e55e447e2d27a29ed04fe79eb30c" +uuid = "b53b4c65-9356-5827-b1ea-8c7a1a84506f" +version = "1.6.38+0" + +[[libsodium_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "848ab3d00fe39d6fbc2a8641048f8f272af1c51e" +uuid = "a9144af2-ca23-56d9-984f-0d03f7b5ccf8" +version = "1.0.20+0" + +[[libvorbis_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Ogg_jll", "Pkg"] +git-tree-sha1 = "fa14ac25af7a4b8a7f61b287a124df7aab601bcd" +uuid = "f27f6e37-5d2b-51aa-960f-b287f2bc3b7a" +version = "1.3.6+6" + +[[nghttp2_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" + +[[p7zip_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" + +[[x264_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "d713c1ce4deac133e3334ee12f4adff07f81778f" +uuid = "1270edf5-f2f9-52d2-97e9-ab00b5d0237a" +version = "2020.7.14+2" + +[[x265_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "487da2f8f2f0c8ee0e83f39d13037d6bbf0a45ab" +uuid = "dfaa095f-4041-5dcd-9319-2fabd8486b76" +version = "3.0.0+3" + +[[xkbcommon_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll", "Wayland_protocols_jll", "Xorg_libxcb_jll", "Xorg_xkeyboard_config_jll"] +git-tree-sha1 = "ece2350174195bb31de1a63bea3a41ae1aa593b6" +uuid = "d8fb68d0-12a3-5cfd-a85a-d49703b185fd" +version = "0.9.1+5" diff --git a/tutorials/Testing/Project.toml b/tutorials/Testing/Project.toml new file mode 100644 index 00000000..9c4e0a35 --- /dev/null +++ b/tutorials/Testing/Project.toml @@ -0,0 +1,5 @@ +[deps] +SciMLTutorials = "30cb0354-2223-46a9-baa0-41bdcfbe0178" + +[compat] +SciMLTutorials = "0.9, 1" diff --git a/tutorials/Testing/test.jmd b/tutorials/Testing/test.jmd new file mode 100644 index 00000000..4a909381 --- /dev/null +++ b/tutorials/Testing/test.jmd @@ -0,0 +1,11 @@ +--- +title: Test +author: Chris Rackauckas +--- + +This is a test of the builder system. It often gets bumped manually. + +```julia, echo = false, skip="notebook" +using SciMLTutorials +SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder], WEAVE_ARGS[:file]) +``` diff --git a/tutorials/advanced/beeler_reuter.jmd b/tutorials/advanced/beeler_reuter.jmd deleted file mode 100644 index c07b64ab..00000000 --- a/tutorials/advanced/beeler_reuter.jmd +++ /dev/null @@ -1,661 +0,0 @@ ---- -title: An Implicit/Explicit CUDA-Accelerated Solver for the 2D Beeler-Reuter Model -author: Shahriar Iravanian ---- - -## Background - -[JuliaDiffEq](https://github.com/JuliaDiffEq) is a suite of optimized Julia libraries to solve ordinary differential equations (ODE). *JuliaDiffEq* provides a large number of explicit and implicit solvers suited for different types of ODE problems. It is possible to reduce a system of partial differential equations into an ODE problem by employing the [method of lines (MOL)](https://en.wikipedia.org/wiki/Method_of_lines). The essence of MOL is to discretize the spatial derivatives (by finite difference, finite volume or finite element methods) into algebraic equations and to keep the time derivatives as is. The resulting differential equations are left with only one independent variable (time) and can be solved with an ODE solver. [Solving Systems of Stochastic PDEs and using GPUs in Julia](http://www.stochasticlifestyle.com/solving-systems-stochastic-pdes-using-gpus-julia/) is a brief introduction to MOL and using GPUs to accelerate PDE solving in *JuliaDiffEq*. Here we expand on this introduction by developing an implicit/explicit (IMEX) solver for a 2D cardiac electrophysiology model and show how to use [CuArray](https://github.com/JuliaGPU/CuArrays.jl) and [CUDAnative](https://github.com/JuliaGPU/CUDAnative.jl) libraries to run the explicit part of the model on a GPU. - -Note that this tutorial does not use the [higher order IMEX methods built into DifferentialEquations.jl](http://docs.juliadiffeq.org/latest/solvers/split_ode_solve.html#Implicit-Explicit-(IMEX)-ODE-1) but instead shows how to hand-split an equation when the explicit portion has an analytical solution (or approxiate), which is common in many scenarios. - -There are hundreds of ionic models that describe cardiac electrical activity in various degrees of detail. Most are based on the classic [Hodgkin-Huxley model](https://en.wikipedia.org/wiki/Hodgkin%E2%80%93Huxley_model) and define the time-evolution of different state variables in the form of nonlinear first-order ODEs. The state vector for these models includes the transmembrane potential, gating variables, and ionic concentrations. The coupling between cells is through the transmembrame potential only and is described as a reaction-diffusion equation, which is a parabolic PDE, - -$$\partial V / \partial t = \nabla (D \nabla V) - \frac {I_\text{ion}} {C_m},$$ - -where $V$ is the transmembrane potential, $D$ is a diffusion tensor, $I_\text{ion}$ is the sum of the transmembrane currents and is calculated from the ODEs, and $C_m$ is the membrane capacitance and is usually assumed to be constant. Here we model a uniform and isotropic medium. Therefore, the model can be simplified to, - -$$\partial V / \partial t = D \Delta{V} - \frac {I_\text{ion}} {C_m},$$ - -where $D$ is now a scalar. By nature, these models have to deal with different time scales and are therefore classified as *stiff*. Commonly, they are solved using the explicit Euler method, usually with a closed form for the integration of the gating variables (the Rush-Larsen method, see below). We can also solve these problems using implicit or semi-implicit PDE solvers (e.g., the [Crank-Nicholson method](https://en.wikipedia.org/wiki/Crank%E2%80%93Nicolson_method) combined with an iterative solver). Higher order explicit methods such as Runge-Kutta and linear multi-step methods cannot overcome the stiffness and are not particularly helpful. - -In this tutorial, we first develop a CPU-only IMEX solver and then show how to move the explicit part to a GPU. - -### The Beeler-Reuter Model - -We have chosen the [Beeler-Reuter ventricular ionic model](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1283659/) as our example. It is a classic model first described in 1977 and is used as a base for many other ionic models. It has eight state variables, which makes it complicated enough to be interesting without obscuring the main points of the exercise. The eight state variables are: the transmembrane potential ($V$), sodium-channel activation and inactivation gates ($m$ and $h$, similar to the Hodgkin-Huxley model), with an additional slow inactivation gate ($j$), calcium-channel activation and deactivations gates ($d$ and $f$), a time-dependent inward-rectifying potassium current gate ($x_1$), and intracellular calcium concentration ($c$). There are four currents: a sodium current ($i_{Na}$), a calcium current ($i_{Ca}$), and two potassium currents, one time-dependent ($i_{x_1}$) and one background time-independent ($i_{K_1}$). - -## CPU-Only Beeler-Reuter Solver - -Let's start by developing a CPU only IMEX solver. The main idea is to use the *DifferentialEquations* framework to handle the implicit part of the equation and code the analytical approximation for explicit part separately. If no analytical approximation was known for the explicit part, one could use methods from [this list](http://docs.juliadiffeq.org/latest/solvers/split_ode_solve.html#Implicit-Explicit-(IMEX)-ODE-1). - -First, we define the model constants: - -```julia -const v0 = -84.624 -const v1 = 10.0 -const C_K1 = 1.0f0 -const C_x1 = 1.0f0 -const C_Na = 1.0f0 -const C_s = 1.0f0 -const D_Ca = 0.0f0 -const D_Na = 0.0f0 -const g_s = 0.09f0 -const g_Na = 4.0f0 -const g_NaC = 0.005f0 -const ENa = 50.0f0 + D_Na -const γ = 0.5f0 -const C_m = 1.0f0 -``` - -Note that the constants are defined as `Float32` and not `Float64`. The reason is that most GPUs have many more single precision cores than double precision ones. To ensure uniformity between CPU and GPU, we also code most states variables as `Float32` except for the transmembrane potential, which is solved by an implicit solver provided by the Sundial library and needs to be `Float64`. - -### The State Structure - -Next, we define a struct to contain our state. `BeelerReuterCpu` is a functor and we will define a deriv function as its associated function. - -```julia -mutable struct BeelerReuterCpu <: Function - t::Float64 # the last timestep time to calculate Δt - diff_coef::Float64 # the diffusion-coefficient (coupling strength) - - C::Array{Float32, 2} # intracellular calcium concentration - M::Array{Float32, 2} # sodium current activation gate (m) - H::Array{Float32, 2} # sodium current inactivation gate (h) - J::Array{Float32, 2} # sodium current slow inactivaiton gate (j) - D::Array{Float32, 2} # calcium current activaiton gate (d) - F::Array{Float32, 2} # calcium current inactivation gate (f) - XI::Array{Float32, 2} # inward-rectifying potassium current (iK1) - - Δu::Array{Float64, 2} # place-holder for the Laplacian - - function BeelerReuterCpu(u0, diff_coef) - self = new() - - ny, nx = size(u0) - self.t = 0.0 - self.diff_coef = diff_coef - - self.C = fill(0.0001f0, (ny,nx)) - self.M = fill(0.01f0, (ny,nx)) - self.H = fill(0.988f0, (ny,nx)) - self.J = fill(0.975f0, (ny,nx)) - self.D = fill(0.003f0, (ny,nx)) - self.F = fill(0.994f0, (ny,nx)) - self.XI = fill(0.0001f0, (ny,nx)) - - self.Δu = zeros(ny,nx) - - return self - end -end -``` - -### Laplacian - -The finite-difference Laplacian is calculated in-place by a 5-point stencil. The Neumann boundary condition is enforced. Note that we could have also used [DiffEqOperators.jl](https://github.com/JuliaDiffEq/DiffEqOperators.jl) to automate this step. - -```julia -# 5-point stencil -function laplacian(Δu, u) - n1, n2 = size(u) - - # internal nodes - for j = 2:n2-1 - for i = 2:n1-1 - @inbounds Δu[i,j] = u[i+1,j] + u[i-1,j] + u[i,j+1] + u[i,j-1] - 4*u[i,j] - end - end - - # left/right edges - for i = 2:n1-1 - @inbounds Δu[i,1] = u[i+1,1] + u[i-1,1] + 2*u[i,2] - 4*u[i,1] - @inbounds Δu[i,n2] = u[i+1,n2] + u[i-1,n2] + 2*u[i,n2-1] - 4*u[i,n2] - end - - # top/bottom edges - for j = 2:n2-1 - @inbounds Δu[1,j] = u[1,j+1] + u[1,j-1] + 2*u[2,j] - 4*u[1,j] - @inbounds Δu[n1,j] = u[n1,j+1] + u[n1,j-1] + 2*u[n1-1,j] - 4*u[n1,j] - end - - # corners - @inbounds Δu[1,1] = 2*(u[2,1] + u[1,2]) - 4*u[1,1] - @inbounds Δu[n1,1] = 2*(u[n1-1,1] + u[n1,2]) - 4*u[n1,1] - @inbounds Δu[1,n2] = 2*(u[2,n2] + u[1,n2-1]) - 4*u[1,n2] - @inbounds Δu[n1,n2] = 2*(u[n1-1,n2] + u[n1,n2-1]) - 4*u[n1,n2] -end -``` - -### The Rush-Larsen Method - -We use an explicit solver for all the state variables except for the transmembrane potential which is solved with the help of an implicit solver. The explicit solver is a domain-specific exponential method, the Rush-Larsen method. This method utilizes an approximation on the model in order to transform the IMEX equation into a form suitable for an implicit ODE solver. This combination of implicit and explicit methods forms a specialized IMEX solver. For general IMEX integration, please see the [IMEX solvers documentation](http://docs.juliadiffeq.org/latest/solvers/split_ode_solve.html#Implicit-Explicit-%28IMEX%29-ODE-1). While we could have used the general model to solve the current problem, for this specific model, the transformation approach is more efficient and is of practical interest. - -The [Rush-Larsen](https://ieeexplore.ieee.org/document/4122859/) method replaces the explicit Euler integration for the gating variables with direct integration. The starting point is the general ODE for the gating variables in Hodgkin-Huxley style ODEs, - -$$\frac{dg}{dt} = \alpha(V) (1 - g) - \beta(V) g$$ - -where $g$ is a generic gating variable, ranging from 0 to 1, and $\alpha$ and $\beta$ are reaction rates. This equation can be written as, - -$$\frac{dg}{dt} = (g_{\infty} - g) / \tau_g,$$ - -where $g_\infty$ and $\tau_g$ are - -$$g_{\infty} = \frac{\alpha}{(\alpha + \beta)},$$ - -and, - -$$\tau_g = \frac{1}{(\alpha + \beta)}.$$ - -Assuing that $g_\infty$ and $\tau_g$ are constant for the duration of a single time step ($\Delta{t}$), which is a reasonable assumption for most cardiac models, we can integrate directly to have, - -$$g(t + \Delta{t}) = g_{\infty} - \left(g_{\infty} - g(\Delta{t})\right)\,e^{-\Delta{t}/\tau_g}.$$ - -This is the Rush-Larsen technique. Note that as $\Delta{t} \rightarrow 0$, this equations morphs into the explicit Euler formula, - -$$g(t + \Delta{t}) = g(t) + \Delta{t}\frac{dg}{dt}.$$ - -`rush_larsen` is a helper function that use the Rush-Larsen method to integrate the gating variables. - -```julia -@inline function rush_larsen(g, α, β, Δt) - inf = α/(α+β) - τ = 1f0 / (α+β) - return clamp(g + (g - inf) * expm1(-Δt/τ), 0f0, 1f0) -end -``` - -The gating variables are updated as below. The details of how to calculate $\alpha$ and $\beta$ are based on the Beeler-Reuter model and not of direct interest to this tutorial. - -```julia -function update_M_cpu(g, v, Δt) - # the condition is needed here to prevent NaN when v == 47.0 - α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (exp(-0.1f0*(v+47.0f0)) - 1.0f0) - β = (40.0f0 * exp(-0.056f0*(v+72.0f0))) - return rush_larsen(g, α, β, Δt) -end - -function update_H_cpu(g, v, Δt) - α = 0.126f0 * exp(-0.25f0*(v+77.0f0)) - β = 1.7f0 / (exp(-0.082f0*(v+22.5f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_J_cpu(g, v, Δt) - α = (0.55f0 * exp(-0.25f0*(v+78.0f0))) / (exp(-0.2f0*(v+78.0f0)) + 1.0f0) - β = 0.3f0 / (exp(-0.1f0*(v+32.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_D_cpu(g, v, Δt) - α = γ * (0.095f0 * exp(-0.01f0*(v-5.0f0))) / (exp(-0.072f0*(v-5.0f0)) + 1.0f0) - β = γ * (0.07f0 * exp(-0.017f0*(v+44.0f0))) / (exp(0.05f0*(v+44.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_F_cpu(g, v, Δt) - α = γ * (0.012f0 * exp(-0.008f0*(v+28.0f0))) / (exp(0.15f0*(v+28.0f0)) + 1.0f0) - β = γ * (0.0065f0 * exp(-0.02f0*(v+30.0f0))) / (exp(-0.2f0*(v+30.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_XI_cpu(g, v, Δt) - α = (0.0005f0 * exp(0.083f0*(v+50.0f0))) / (exp(0.057f0*(v+50.0f0)) + 1.0f0) - β = (0.0013f0 * exp(-0.06f0*(v+20.0f0))) / (exp(-0.04f0*(v+20.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end -``` - -The intracelleular calcium is not technically a gating variable, but we can use a similar explicit exponential integrator for it. - -```julia -function update_C_cpu(g, d, f, v, Δt) - ECa = D_Ca - 82.3f0 - 13.0278f0 * log(g) - kCa = C_s * g_s * d * f - iCa = kCa * (v - ECa) - inf = 1.0f-7 * (0.07f0 - g) - τ = 1f0 / 0.07f0 - return g + (g - inf) * expm1(-Δt/τ) -end -``` - -### Implicit Solver - -Now, it is time to define the derivative function as an associated function of **BeelerReuterCpu**. We plan to use the CVODE_BDF solver as our implicit portion. Similar to other iterative methods, it calls the deriv function with the same $t$ multiple times. For example, these are consecutive $t$s from a representative run: - -0.86830 -0.86830 -0.85485 -0.85485 -0.85485 -0.86359 -0.86359 -0.86359 -0.87233 -0.87233 -0.87233 -0.88598 -... - -Here, every time step is called three times. We distinguish between two types of calls to the deriv function. When $t$ changes, the gating variables are updated by calling `update_gates_cpu`: - -```julia -function update_gates_cpu(u, XI, M, H, J, D, F, C, Δt) - let Δt = Float32(Δt) - n1, n2 = size(u) - for j = 1:n2 - for i = 1:n1 - v = Float32(u[i,j]) - - XI[i,j] = update_XI_cpu(XI[i,j], v, Δt) - M[i,j] = update_M_cpu(M[i,j], v, Δt) - H[i,j] = update_H_cpu(H[i,j], v, Δt) - J[i,j] = update_J_cpu(J[i,j], v, Δt) - D[i,j] = update_D_cpu(D[i,j], v, Δt) - F[i,j] = update_F_cpu(F[i,j], v, Δt) - - C[i,j] = update_C_cpu(C[i,j], D[i,j], F[i,j], v, Δt) - end - end - end -end -``` - -On the other hand, du is updated at each time step, since it is independent of $\Delta{t}$. - -```julia -# iK1 is the inward-rectifying potassium current -function calc_iK1(v) - ea = exp(0.04f0*(v+85f0)) - eb = exp(0.08f0*(v+53f0)) - ec = exp(0.04f0*(v+53f0)) - ed = exp(-0.04f0*(v+23f0)) - return 0.35f0 * (4f0*(ea-1f0)/(eb + ec) - + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed))) -end - -# ix1 is the time-independent background potassium current -function calc_ix1(v, xi) - ea = exp(0.04f0*(v+77f0)) - eb = exp(0.04f0*(v+35f0)) - return xi * 0.8f0 * (ea-1f0) / eb -end - -# iNa is the sodium current (similar to the classic Hodgkin-Huxley model) -function calc_iNa(v, m, h, j) - return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa) -end - -# iCa is the calcium current -function calc_iCa(v, d, f, c) - ECa = D_Ca - 82.3f0 - 13.0278f0 * log(c) # ECa is the calcium reversal potential - return C_s * g_s * d * f * (v - ECa) -end - -function update_du_cpu(du, u, XI, M, H, J, D, F, C) - n1, n2 = size(u) - - for j = 1:n2 - for i = 1:n1 - v = Float32(u[i,j]) - - # calculating individual currents - iK1 = calc_iK1(v) - ix1 = calc_ix1(v, XI[i,j]) - iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j]) - iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j]) - - # total current - I_sum = iK1 + ix1 + iNa + iCa - - # the reaction part of the reaction-diffusion equation - du[i,j] = -I_sum / C_m - end - end -end -``` - -Finally, we put everything together is our deriv function, which is a call on `BeelerReuterCpu`. - -```julia -function (f::BeelerReuterCpu)(du, u, p, t) - Δt = t - f.t - - if Δt != 0 || t == 0 - update_gates_cpu(u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C, Δt) - f.t = t - end - - laplacian(f.Δu, u) - - # calculate the reaction portion - update_du_cpu(du, u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C) - - # ...add the diffusion portion - du .+= f.diff_coef .* f.Δu -end -``` - -### Results - -Time to test! We need to define the starting transmembrane potential with the help of global constants **v0** and **v1**, which represent the resting and activated potentials. - -```julia -const N = 192; -u0 = fill(v0, (N, N)); -u0[90:102,90:102] .= v1; # a small square in the middle of the domain -``` - -The initial condition is a small square in the middle of the domain. - -```julia -using Plots -heatmap(u0) -``` - -Next, the problem is defined: - -```julia -using DifferentialEquations, Sundials - -deriv_cpu = BeelerReuterCpu(u0, 1.0); -prob = ODEProblem(deriv_cpu, u0, (0.0, 50.0)); -``` - -For stiff reaction-diffusion equations, CVODE_BDF from Sundial library is an excellent solver. - -```julia -@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0); -``` - -```julia -heatmap(sol.u[end]) -``` - -## CPU/GPU Beeler-Reuter Solver - -GPUs are great for embarrassingly parallel problems but not so much for highly coupled models. We plan to keep the implicit part on CPU and run the decoupled explicit code on a GPU with the help of the CUDAnative library. - -### GPUs and CUDA - -It this section, we present a brief summary of how GPUs (specifically NVIDIA GPUs) work and how to program them using the Julia CUDA interface. The readers who are familiar with these basic concepts may skip this section. - -Let's start by looking at the hardware of a typical high-end GPU, GTX 1080. It has four Graphics Processing Clusters (equivalent to a discrete CPU), each harboring five Streaming Multiprocessor (similar to a CPU core). Each SM has 128 single-precision CUDA cores. Therefore, GTX 1080 has a total of 4 x 5 x 128 = 2560 CUDA cores. The maximum theoretical throughput for a GTX 1080 is reported as 8.87 TFLOPS. This figure is calculated for a boost clock frequency of 1.733 MHz as 2 x 2560 x 1.733 MHz = 8.87 TFLOPS. The factor 2 is included because two single floating point operations, a multiplication and an addition, can be done in a clock cycle as part of a fused-multiply-addition FMA operation. GTX 1080 also has 8192 MB of global memory accessible to all the cores (in addition to local and shared memory on each SM). - -A typical CUDA application has the following flow: - -1. Define and initialize the problem domain tensors (multi-dimensional arrays) in CPU memory. -2. Allocate corresponding tensors in the GPU global memory. -3. Transfer the input tensors from CPU to the corresponding GPU tensors. -4. Invoke CUDA kernels (i.e., the GPU functions callable from CPU) that operate on the GPU tensors. -5. Transfer the result tensors from GPU back to CPU. -6. Process tensors on CPU. -7. Repeat steps 3-6 as needed. - -Some libraries, such as [ArrayFire](https://github.com/arrayfire/arrayfire), hide the complexicities of steps 2-5 behind a higher level of abstraction. However, here we take a lower level route. By using [CuArray](https://github.com/JuliaGPU/CuArrays.jl) and [CUDAnative](https://github.com/JuliaGPU/CUDAnative.jl), we achieve a finer-grained control and higher performance. In return, we need to implement each step manually. - -*CuArray* is a thin abstraction layer over the CUDA API and allows us to define GPU-side tensors and copy data to and from them but does not provide for operations on tensors. *CUDAnative* is a compiler that translates Julia functions designated as CUDA kernels into ptx (a high-level CUDA assembly language). - -### The CUDA Code - -The key to fast CUDA programs is to minimize CPU/GPU memory transfers and global memory accesses. The implicit solver is currently CPU only, but it only needs access to the transmembrane potential. The rest of state variables reside on the GPU memory. - -We modify ``BeelerReuterCpu`` into ``BeelerReuterGpu`` by defining the state variables as *CuArray*s instead of standard Julia *Array*s. The name of each variable defined on GPU is prefixed by *d_* for clarity. Note that $\Delta{v}$ is a temporary storage for the Laplacian and stays on the CPU side. - -```julia -using CUDAnative, CuArrays - -mutable struct BeelerReuterGpu <: Function - t::Float64 # the last timestep time to calculate Δt - diff_coef::Float64 # the diffusion-coefficient (coupling strength) - - d_C::CuArray{Float32, 2} # intracellular calcium concentration - d_M::CuArray{Float32, 2} # sodium current activation gate (m) - d_H::CuArray{Float32, 2} # sodium current inactivation gate (h) - d_J::CuArray{Float32, 2} # sodium current slow inactivaiton gate (j) - d_D::CuArray{Float32, 2} # calcium current activaiton gate (d) - d_F::CuArray{Float32, 2} # calcium current inactivation gate (f) - d_XI::CuArray{Float32, 2} # inward-rectifying potassium current (iK1) - - d_u::CuArray{Float64, 2} # place-holder for u in the device memory - d_du::CuArray{Float64, 2} # place-holder for d_u in the device memory - - Δv::Array{Float64, 2} # place-holder for voltage gradient - - function BeelerReuterGpu(u0, diff_coef) - self = new() - - ny, nx = size(u0) - @assert (nx % 16 == 0) && (ny % 16 == 0) - self.t = 0.0 - self.diff_coef = diff_coef - - self.d_C = CuArray(fill(0.0001f0, (ny,nx))) - self.d_M = CuArray(fill(0.01f0, (ny,nx))) - self.d_H = CuArray(fill(0.988f0, (ny,nx))) - self.d_J = CuArray(fill(0.975f0, (ny,nx))) - self.d_D = CuArray(fill(0.003f0, (ny,nx))) - self.d_F = CuArray(fill(0.994f0, (ny,nx))) - self.d_XI = CuArray(fill(0.0001f0, (ny,nx))) - - self.d_u = CuArray(u0) - self.d_du = CuArray(zeros(ny,nx)) - - self.Δv = zeros(ny,nx) - - return self - end -end -``` - -The Laplacian function remains unchanged. The main change to the explicit gating solvers is that *exp* and *expm1* functions are prefixed by *CUDAnative.*. This is a technical nuisance that will hopefully be resolved in future. - -```julia -function rush_larsen_gpu(g, α, β, Δt) - inf = α/(α+β) - τ = 1.0/(α+β) - return clamp(g + (g - inf) * CUDAnative.expm1(-Δt/τ), 0f0, 1f0) -end - -function update_M_gpu(g, v, Δt) - # the condition is needed here to prevent NaN when v == 47.0 - α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (CUDAnative.exp(-0.1f0*(v+47.0f0)) - 1.0f0) - β = (40.0f0 * CUDAnative.exp(-0.056f0*(v+72.0f0))) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_H_gpu(g, v, Δt) - α = 0.126f0 * CUDAnative.exp(-0.25f0*(v+77.0f0)) - β = 1.7f0 / (CUDAnative.exp(-0.082f0*(v+22.5f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_J_gpu(g, v, Δt) - α = (0.55f0 * CUDAnative.exp(-0.25f0*(v+78.0f0))) / (CUDAnative.exp(-0.2f0*(v+78.0f0)) + 1.0f0) - β = 0.3f0 / (CUDAnative.exp(-0.1f0*(v+32.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_D_gpu(g, v, Δt) - α = γ * (0.095f0 * CUDAnative.exp(-0.01f0*(v-5.0f0))) / (CUDAnative.exp(-0.072f0*(v-5.0f0)) + 1.0f0) - β = γ * (0.07f0 * CUDAnative.exp(-0.017f0*(v+44.0f0))) / (CUDAnative.exp(0.05f0*(v+44.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_F_gpu(g, v, Δt) - α = γ * (0.012f0 * CUDAnative.exp(-0.008f0*(v+28.0f0))) / (CUDAnative.exp(0.15f0*(v+28.0f0)) + 1.0f0) - β = γ * (0.0065f0 * CUDAnative.exp(-0.02f0*(v+30.0f0))) / (CUDAnative.exp(-0.2f0*(v+30.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_XI_gpu(g, v, Δt) - α = (0.0005f0 * CUDAnative.exp(0.083f0*(v+50.0f0))) / (CUDAnative.exp(0.057f0*(v+50.0f0)) + 1.0f0) - β = (0.0013f0 * CUDAnative.exp(-0.06f0*(v+20.0f0))) / (CUDAnative.exp(-0.04f0*(v+20.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_C_gpu(c, d, f, v, Δt) - ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c) - kCa = C_s * g_s * d * f - iCa = kCa * (v - ECa) - inf = 1.0f-7 * (0.07f0 - c) - τ = 1f0 / 0.07f0 - return c + (c - inf) * CUDAnative.expm1(-Δt/τ) -end -``` - -Similarly, we modify the functions to calculate the individual currents by adding CUDAnative prefix. - -```julia -# iK1 is the inward-rectifying potassium current -function calc_iK1(v) - ea = CUDAnative.exp(0.04f0*(v+85f0)) - eb = CUDAnative.exp(0.08f0*(v+53f0)) - ec = CUDAnative.exp(0.04f0*(v+53f0)) - ed = CUDAnative.exp(-0.04f0*(v+23f0)) - return 0.35f0 * (4f0*(ea-1f0)/(eb + ec) - + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed))) -end - -# ix1 is the time-independent background potassium current -function calc_ix1(v, xi) - ea = CUDAnative.exp(0.04f0*(v+77f0)) - eb = CUDAnative.exp(0.04f0*(v+35f0)) - return xi * 0.8f0 * (ea-1f0) / eb -end - -# iNa is the sodium current (similar to the classic Hodgkin-Huxley model) -function calc_iNa(v, m, h, j) - return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa) -end - -# iCa is the calcium current -function calc_iCa(v, d, f, c) - ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c) # ECa is the calcium reversal potential - return C_s * g_s * d * f * (v - ECa) -end -``` - -### CUDA Kernels - -A CUDA program does not directly deal with GPCs and SMs. The logical view of a CUDA program is in the term of *blocks* and *threads*. We have to specify the number of block and threads when running a CUDA *kernel*. Each thread runs on a single CUDA core. Threads are logically bundled into blocks, which are in turn specified on a grid. The grid stands for the entirety of the domain of interest. - -Each thread can find its logical coordinate by using few pre-defined indexing variables (*threadIdx*, *blockIdx*, *blockDim* and *gridDim*) in C/C++ and the corresponding functions (e.g., `threadIdx()`) in Julia. There variables and functions are defined automatically for each thread and may return a different value depending on the calling thread. The return value of these functions is a 1, 2, or 3 dimensional structure whose elements can be accessed as `.x`, `.y`, and `.z` (for a 1-dimensional case, `.x` reports the actual index and `.y` and `.z` simply return 1). For example, if we deploy a kernel in 128 blocks and with 256 threads per block, each thread will see - -``` - gridDim.x = 128; - blockDim=256; -``` - -while `blockIdx.x` ranges from 0 to 127 in C/C++ and 1 to 128 in Julia. Similarly, `threadIdx.x` will be between 0 to 255 in C/C++ (of course, in Julia the range will be 1 to 256). - -A C/C++ thread can calculate its index as - -``` - int idx = blockDim.x * blockIdx.x + threadIdx.x; -``` - -In Julia, we have to take into account base 1. Therefore, we use the following formula - -``` - idx = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x -``` - -A CUDA programmer is free to interpret the calculated index however it fits the application, but in practice, it is usually interpreted as an index into input tensors. - -In the GPU version of the solver, each thread works on a single element of the medium, indexed by a (x,y) pair. -`update_gates_gpu` and `update_du_gpu` are very similar to their CPU counterparts but are in fact CUDA kernels where the *for* loops are replaced with CUDA specific indexing. Note that CUDA kernels cannot return a valve; hence, *nothing* at the end. - -```julia -function update_gates_gpu(u, XI, M, H, J, D, F, C, Δt) - i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x - j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y - - v = Float32(u[i,j]) - - let Δt = Float32(Δt) - XI[i,j] = update_XI_gpu(XI[i,j], v, Δt) - M[i,j] = update_M_gpu(M[i,j], v, Δt) - H[i,j] = update_H_gpu(H[i,j], v, Δt) - J[i,j] = update_J_gpu(J[i,j], v, Δt) - D[i,j] = update_D_gpu(D[i,j], v, Δt) - F[i,j] = update_F_gpu(F[i,j], v, Δt) - - C[i,j] = update_C_gpu(C[i,j], D[i,j], F[i,j], v, Δt) - end - nothing -end - -function update_du_gpu(du, u, XI, M, H, J, D, F, C) - i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x - j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y - - v = Float32(u[i,j]) - - # calculating individual currents - iK1 = calc_iK1(v) - ix1 = calc_ix1(v, XI[i,j]) - iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j]) - iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j]) - - # total current - I_sum = iK1 + ix1 + iNa + iCa - - # the reaction part of the reaction-diffusion equation - du[i,j] = -I_sum / C_m - nothing -end -``` - -### Implicit Solver - -Finally, the deriv function is modified to copy *u* to GPU and copy *du* back and to invoke CUDA kernels. - -```julia -function (f::BeelerReuterGpu)(du, u, p, t) - L = 16 # block size - Δt = t - f.t - copyto!(f.d_u, u) - ny, nx = size(u) - - if Δt != 0 || t == 0 - @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_gates_gpu( - f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C, Δt) - f.t = t - end - - laplacian(f.Δv, u) - - # calculate the reaction portion - @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_du_gpu( - f.d_du, f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C) - - copyto!(du, f.d_du) - - # ...add the diffusion portion - du .+= f.diff_coef .* f.Δv -end -``` - -Ready to test! - -```julia -using DifferentialEquations, Sundials - -deriv_gpu = BeelerReuterGpu(u0, 1.0); -prob = ODEProblem(deriv_gpu, u0, (0.0, 50.0)); -@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0); -``` - -```julia -heatmap(sol.u[end]) -``` - -## Summary - -We achieve around a 6x speedup with running the explicit portion of our IMEX solver on a GPU. The major bottleneck of this technique is the communication between CPU and GPU. In its current form, not all of the internals of the method utilize GPU acceleration. In particular, the implicit equations solved by GMRES are performed on the CPU. This partial CPU nature also increases the amount of data transfer that is required between the GPU and CPU (performed every f call). Compiling the full ODE solver to the GPU would solve both of these issues and potentially give a much larger speedup. [JuliaDiffEq developers are currently working on solutions to alleviate these issues](http://www.stochasticlifestyle.com/solving-systems-stochastic-pdes-using-gpus-julia/), but these will only be compatible with native Julia solvers (and not Sundials). - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/callbacks_and_events.jmd b/tutorials/introduction/callbacks_and_events.jmd deleted file mode 100644 index b1b98050..00000000 --- a/tutorials/introduction/callbacks_and_events.jmd +++ /dev/null @@ -1,327 +0,0 @@ ---- -title: Callbacks and Events -author: Chris Rackauckas ---- - -In working with a differential equation, our system will evolve through many states. Particular states of the system may be of interest to us, and we say that an ***"event"*** is triggered when our system reaches these states. For example, events may include the moment when our system reaches a particular temperature or velocity. We ***handle*** these events with ***callbacks***, which tell us what to do once an event has been triggered. - -These callbacks allow for a lot more than event handling, however. For example, we can use callbacks to achieve high-level behavior like exactly preserve conservation laws and save the trace of a matrix at pre-defined time points. This extra functionality allows us to use the callback system as a modding system for the DiffEq ecosystem's solvers. - -This tutorial is an introduction to the callback and event handling system in DifferentialEquations.jl, documented in the [Event Handling and Callback Functions](http://docs.juliadiffeq.org/latest/features/callback_functions.html) page of the documentation. We will also introduce you to some of the most widely used callbacks in the [Callback Library](http://docs.juliadiffeq.org/latest/features/callback_library.html), which is a library of pre-built mods. - -## Events and Continuous Callbacks - -Event handling is done through continuous callbacks. Callbacks take a function, `condition`, which triggers an `affect!` when `condition == 0`. These callbacks are called "continuous" because they will utilize rootfinding on the interpolation to find the "exact" time point at which the condition takes place and apply the `affect!` at that time point. - -***Let's use a bouncing ball as a simple system to explain events and callbacks.*** Let's take Newton's model of a ball falling towards the Earth's surface via a gravitational constant `g`. In this case, the velocity is changing via `-g`, and position is changing via the velocity. Therefore we receive the system of ODEs: - -```julia -using DifferentialEquations, ParameterizedFunctions -ball! = @ode_def BallBounce begin - dy = v - dv = -g -end g -``` - -We want the callback to trigger when `y=0` since that's when the ball will hit the Earth's surface (our event). We do this with the condition: - -```julia -function condition(u,t,integrator) - u[1] -end -``` - -Recall that the `condition` will trigger when it evaluates to zero, and here it will evaluate to zero when `u[1] == 0`, which occurs when `v == 0`. *Now we have to say what we want the callback to do.* Callbacks make use of the [Integrator Interface](http://docs.juliadiffeq.org/latest/basics/integrator.html). Instead of giving a full description, a quick and usable rundown is: - -- Values are strored in `integrator.u` -- Times are stored in `integrator.t` -- The parameters are stored in `integrator.p` -- `integrator(t)` performs an interpolation in the current interval between `integrator.tprev` and `integrator.t` (and allows extrapolation) -- User-defined options (tolerances, etc.) are stored in `integrator.opts` -- `integrator.sol` is the current solution object. Note that `integrator.sol.prob` is the current problem - -While there's a lot more on the integrator interface page, that's a working knowledge of what's there. - -What we want to do with our `affect!` is to "make the ball bounce". Mathematically speaking, the ball bounces when the sign of the velocity flips. As an added behavior, let's also use a small friction constant to dampen the ball's velocity. This way only a percentage of the velocity will be retained when the event is triggered and the callback is used. We'll define this behavior in the `affect!` function: - -```julia -function affect!(integrator) - integrator.u[2] = -integrator.p[2] * integrator.u[2] -end -``` - -`integrator.u[2]` is the second value of our model, which is `v` or velocity, and `integrator.p[2]`, is our friction coefficient. - -Therefore `affect!` can be read as follows: `affect!` will take the current value of velocity, and multiply it `-1` multiplied by our friction coefficient. Therefore the ball will change direction and its velocity will dampen when `affect!` is called. - -Now let's build the `ContinuousCallback`: - -```julia -bounce_cb = ContinuousCallback(condition,affect!) -``` - -Now let's make an `ODEProblem` which has our callback: - -```julia -u0 = [50.0,0.0] -tspan = (0.0,15.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=bounce_cb) -``` - -Notice that we chose a friction constant of `0.9`. Now we can solve the problem and plot the solution as we normally would: - -```julia -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) -``` - -and tada, the ball bounces! Notice that the `ContinuousCallback` is using the interpolation to apply the effect "exactly" when `v == 0`. This is crucial for model correctness, and thus when this property is needed a `ContinuousCallback` should be used. - -#### Exercise 1 - -In our example we used a constant coefficient of friction, but if we are bouncing the ball in the same place we may be smoothing the surface (say, squishing the grass), causing there to be less friction after each bounce. In this more advanced model, we want the friction coefficient at the next bounce to be `sqrt(friction)` from the previous bounce (since `friction < 1`, `sqrt(friction) > friction` and `sqrt(friction) < 1`). - -Hint: there are many ways to implement this. One way to do it is to make `p` a `Vector` and mutate the friction coefficient in the `affect!`. - -## Discrete Callbacks - -A discrete callback checks a `condition` after every integration step and, if true, it will apply an `affect!`. For example, let's say that at time `t=2` we want to include that a kid kicked the ball, adding `20` to the current velocity. This kind of situation, where we want to add a specific behavior which does not require rootfinding, is a good candidate for a `DiscreteCallback`. In this case, the `condition` is a boolean for whether to apply the `affect!`, so: - -```julia -function condition_kick(u,t,integrator) - t == 2 -end -``` - -We want the kick to occur at `t=2`, so we check for that time point. When we are at this time point, we want to do: - -```julia -function affect_kick!(integrator) - integrator.u[2] += 50 -end -``` - -Now we build the problem as before: - -```julia -kick_cb = DiscreteCallback(condition_kick,affect_kick!) -u0 = [50.0,0.0] -tspan = (0.0,10.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=kick_cb) -``` - -Note that, since we are requiring our effect at exactly the time `t=2`, we need to tell the integration scheme to step at exactly `t=2` to apply this callback. This is done via the option `tstops`, which is like `saveat` but means "stop at these values". - -```julia -sol = solve(prob,Tsit5(),tstops=[2.0]) -plot(sol) -``` - -Note that this example could've been done with a `ContinuousCallback` by checking the condition `t-2`. - -## Merging Callbacks with Callback Sets - -In some cases you may want to merge callbacks to build up more complex behavior. In our previous result, notice that the model is unphysical because the ball goes below zero! What we really need to do is add the bounce callback together with the kick. This can be achieved through the `CallbackSet`. - -```julia -cb = CallbackSet(bounce_cb,kick_cb) -``` - -A `CallbackSet` merges their behavior together. The logic is as follows. In a given interval, if there are multiple continuous callbacks that would trigger, only the one that triggers at the earliest time is used. The time is pulled back to where that continuous callback is triggered, and then the `DiscreteCallback`s in the callback set are called in order. - -```julia -u0 = [50.0,0.0] -tspan = (0.0,15.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=cb) -sol = solve(prob,Tsit5(),tstops=[2.0]) -plot(sol) -``` - -Notice that we have now merged the behaviors. We can then nest this as deep as we like. - -#### Exercise 2 - -Add to the model a linear wind with resistance that changes the acceleration to `-g + k*v` after `t=10`. Do so by adding another parameter and allowing it to be zero until a specific time point where a third callback triggers the change. - -## Integration Termination and Directional Handling - -Let's look at another model now: the model of the [Harmonic Oscillator](https://en.wikipedia.org/wiki/Harmonic_oscillator). We can write this as: - -```julia -u0 = [1.,0.] -harmonic! = @ode_def HarmonicOscillator begin - dv = -x - dx = v -end -tspan = (0.0,10.0) -prob = ODEProblem(harmonic!,u0,tspan) -sol = solve(prob) -plot(sol) -``` - -Let's instead stop the integration when a condition is met. From the [Integrator Interface stepping controls](http://docs.juliadiffeq.org/latest/basics/integrator.html#Stepping-Controls-1) we see that `terminate!(integrator)` will cause the integration to end. So our new `affect!` is simply: - -```julia -function terminate_affect!(integrator) - terminate!(integrator) -end -``` - -Let's first stop the integration when the particle moves back to `x=0`. This means we want to use the condition: - -```julia -function terminate_condition(u,t,integrator) - u[2] -end -terminate_cb = ContinuousCallback(terminate_condition,terminate_affect!) -``` - -Note that instead of adding callbacks to the problem, we can also add them to the `solve` command. This will automatically form a `CallbackSet` with any problem-related callbacks and naturally allows you to distinguish between model features and integration controls. - -```julia -sol = solve(prob,callback=terminate_cb) -plot(sol) -``` - -Notice that the harmonic oscilator's true solution here is `sin` and `cosine`, and thus we would expect this return to zero to happen at `t=π`: - -```julia -sol.t[end] -``` - -This is one way to approximate π! Lower tolerances and arbitrary precision numbers can make this more exact, but let's not look at that. Instead, what if we wanted to halt the integration after exactly one cycle? To do so we would need to ignore the first zero-crossing. Luckily in these types of scenarios there's usually a structure to the problem that can be exploited. Here, we only want to trigger the `affect!` when crossing from positive to negative, and not when crossing from negative to positive. In other words, we want our `affect!` to only occur on upcrossings. - -If the `ContinuousCallback` constructor is given a single `affect!`, it will occur on both upcrossings and downcrossings. If there are two `affect!`s given, then the first is for upcrossings and the second is for downcrossings. An `affect!` can be ignored by using `nothing`. Together, the "upcrossing-only" version of the effect means that the first `affect!` is what we defined above and the second is `nothing`. Therefore we want: - -```julia -terminate_upcrossing_cb = ContinuousCallback(terminate_condition,terminate_affect!,nothing) -``` - -Which gives us: - -```julia -sol = solve(prob,callback=terminate_upcrossing_cb) -plot(sol) -``` - -## Callback Library - -As you can see, callbacks can be very useful and through `CallbackSets` we can merge together various behaviors. Because of this utility, there is a library of pre-built callbacks known as the [Callback Library](http://docs.juliadiffeq.org/latest/features/callback_library.html). We will walk through a few examples where these callbacks can come in handy. - -### Manifold Projection - -One callback is the manifold projection callback. Essentially, you can define any manifold `g(sol)=0` which the solution must live on, and cause the integration to project to that manifold after every step. As an example, let's see what happens if we naively run the harmonic oscillator for a long time: - -```julia -tspan = (0.0,10000.0) -prob = ODEProblem(harmonic!,u0,tspan) -sol = solve(prob) -gr(fmt=:png) # Make it a PNG instead of an SVG since there's a lot of points! -plot(sol,vars=(1,2)) -``` - -```julia -plot(sol,vars=(0,1),denseplot=false) -``` - -Notice that what's going on is that the numerical solution is drifting from the true solution over this long time scale. This is because the integrator is not conserving energy. - -```julia -plot(sol.t,[u[2]^2 + u[1]^2 for u in sol.u]) # Energy ~ x^2 + v^2 -``` - -Some integration techniques like [symplectic integrators](http://docs.juliadiffeq.org/latest/solvers/dynamical_solve.html#Symplectic-Integrators-1) are designed to mitigate this issue, but instead let's tackle the problem by enforcing conservation of energy. To do so, we define our manifold as the one where energy equals 1 (since that holds in the initial condition), that is: - -```julia -function g(resid,u,p,t) - resid[1] = u[2]^2 + u[1]^2 - 1 - resid[2] = 0 -end -``` - -Here the residual measures how far from our desired energy we are, and the number of conditions matches the size of our system (we ignored the second one by making the residual 0). Thus we define a `ManifoldProjection` callback and add that to the solver: - -```julia -cb = ManifoldProjection(g) -sol = solve(prob,callback=cb) -plot(sol,vars=(1,2)) -``` - -```julia -plot(sol,vars=(0,1),denseplot=false) -``` - -Now we have "perfect" energy conservation, where if it's ever violated too much the solution will get projected back to `energy=1`. - -```julia -u1,u2 = sol[500] -u2^2 + u1^2 -``` - -While choosing different integration schemes and using lower tolerances can achieve this effect as well, this can be a nice way to enforce physical constraints and is thus used in many disciplines like molecular dynamics. Another such domain constraining callback is the [`PositiveCallback()`](http://docs.juliadiffeq.org/latest/features/callback_library.html#PositiveDomain-1) which can be used to enforce positivity of the variables. - -### SavingCallback - -The `SavingCallback` can be used to allow for special saving behavior. Let's take a linear ODE define on a system of 1000x1000 matrices: - -```julia -prob = ODEProblem((du,u,p,t)->du.=u,rand(1000,1000),(0.0,1.0)) -``` - -In fields like quantum mechanics you may only want to know specific properties of the solution such as the trace or the norm of the matrix. Saving all of the 1000x1000 matrices can be a costly way to get this information! Instead, we can use the `SavingCallback` to save the `trace` and `norm` at specified times. To do so, we first define our `SavedValues` cache. Our time is in terms of `Float64`, and we want to save tuples of `Float64`s (one for the `trace` and one for the `norm`), and thus we generate the cache as: - -```julia -saved_values = SavedValues(Float64, Tuple{Float64,Float64}) -``` - -Now we define the `SavingCallback` by giving it a function of `(u,p,t,integrator)` that returns the values to save, and the cache: - -```julia -using LinearAlgebra -cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values) -``` - -Here we take `u` and save `(tr(u),norm(u))`. When we solve with this callback: - -```julia -sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving -``` - -Our values are stored in our `saved_values` variable: - -```julia -saved_values.t -``` - -```julia -saved_values.saveval -``` - -By default this happened only at the solver's steps. But the `SavingCallback` has similar controls as the integrator. For example, if we want to save at every `0.1` seconds, we do can so using `saveat`: - -```julia -saved_values = SavedValues(Float64, Tuple{Float64,Float64}) # New cache -cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values, saveat = 0.0:0.1:1.0) -sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving -``` - -```julia -saved_values.t -``` - -```julia -saved_values.saveval -``` - -#### Exercise 3 - -Go back to the Harmonic oscillator. Use the `SavingCallback` to save an array for the energy over time, and do this both with and without the `ManifoldProjection`. Plot the results to see the difference the projection makes. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/choosing_algs.jmd b/tutorials/introduction/choosing_algs.jmd deleted file mode 100644 index 55525b8b..00000000 --- a/tutorials/introduction/choosing_algs.jmd +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: Choosing an ODE Algorithm -author: Chris Rackauckas ---- - -While the default algorithms, along with `alg_hints = [:stiff]`, will suffice in most cases, there are times when you may need to exert more control. The purpose of this part of the tutorial is to introduce you to some of the most widely used algorithm choices and when they should be used. The corresponding page of the documentation is the [ODE Solvers](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html) page which goes into more depth. - -## Diagnosing Stiffness - -One of the key things to know for algorithm choices is whether your problem is stiff. Let's take for example the driven Van Der Pol equation: - -```julia -using DifferentialEquations, ParameterizedFunctions -van! = @ode_def VanDerPol begin - dy = μ*((1-x^2)*y - x) - dx = 1*y -end μ - -prob = ODEProblem(van!,[0.0,2.0],(0.0,6.3),1e6) -``` - -One indicating factor that should alert you to the fact that this model may be stiff is the fact that the parameter is `1e6`: large parameters generally mean stiff models. If we try to solve this with the default method: - -```julia -sol = solve(prob,Tsit5()) -``` - -Here it shows that maximum iterations were reached. Another thing that can happen is that the solution can return that the solver was unstable (exploded to infinity) or that `dt` became too small. If these happen, the first thing to do is to check that your model is correct. It could very well be that you made an error that causes the model to be unstable! - -If the model is the problem, then stiffness could be the reason. We can thus hint to the solver to use an appropriate method: - -```julia -sol = solve(prob,alg_hints = [:stiff]) -``` - -Or we can use the default algorithm. By default, DifferentialEquations.jl uses algorithms like `AutoTsit5(Rodas5())` which automatically detect stiffness and switch to an appropriate method once stiffness is known. - -```julia -sol = solve(prob) -``` - -Another way to understand stiffness is to look at the solution. - -```julia -using Plots; gr() -sol = solve(prob,alg_hints = [:stiff],reltol=1e-6) -plot(sol,denseplot=false) -``` - -Let's zoom in on the y-axis to see what's going on: - -```julia -plot(sol,ylims = (-10.0,10.0)) -``` - -Notice how there are some extreme vertical shifts that occur. These vertical shifts are places where the derivative term is very large, and this is indicative of stiffness. This is an extreme example to highlight the behavior, but this general idea can be carried over to your problem. When in doubt, simply try timing using both a stiff solver and a non-stiff solver and see which is more efficient. - -To try this out, let's use BenchmarkTools, a package that let's us relatively reliably time code blocks. - -```julia -function lorenz!(du,u,p,t) - σ,ρ,β = p - du[1] = σ*(u[2]-u[1]) - du[2] = u[1]*(ρ-u[3]) - u[2] - du[3] = u[1]*u[2] - β*u[3] -end -u0 = [1.0,0.0,0.0] -p = (10,28,8/3) -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan,p) -``` - -And now, let's use the `@btime` macro from benchmark tools to compare the use of non-stiff and stiff solvers on this problem. - -```julia -using BenchmarkTools -@btime solve(prob); -``` - -```julia -@btime solve(prob,alg_hints = [:stiff]); -``` - -In this particular case, we can see that non-stiff solvers get us to the solution much more quickly. - -## The Recommended Methods - -When picking a method, the general rules are as follows: - -- Higher order is more efficient at lower tolerances, lower order is more efficient at higher tolerances -- Adaptivity is essential in most real-world scenarios -- Runge-Kutta methods do well with non-stiff equations, Rosenbrock methods do well with small stiff equations, BDF methods do well with large stiff equations - -While there are always exceptions to the rule, those are good guiding principles. Based on those, a simple way to choose methods is: - -- The default is `Tsit5()`, a non-stiff Runge-Kutta method of Order 5 -- If you use low tolerances (`1e-8`), try `Vern7()` or `Vern9()` -- If you use high tolerances, try `BS3()` -- If the problem is stiff, try `Rosenbrock23()`, `Rodas5()`, or `CVODE_BDF()` -- If you don't know, use `AutoTsit5(Rosenbrock23())` or `AutoVern9(Rodas5())`. - -(This is a simplified version of the default algorithm chooser) - -## Comparison to other Software - -If you are familiar with MATLAB, SciPy, or R's DESolve, here's a quick translation start to have transfer your knowledge over. - -- `ode23` -> `BS3()` -- `ode45`/`dopri5` -> `DP5()`, though in most cases `Tsit5()` is more efficient -- `ode23s` -> `Rosenbrock23()`, though in most cases `Rodas4()` is more efficient -- `ode113` -> `VCABM()`, though in many cases `Vern7()` is more efficient -- `dop853` -> `DP8()`, though in most cases `Vern7()` is more efficient -- `ode15s`/`vode` -> `QNDF()`, though in many cases `CVODE_BDF()`, `Rodas4()` - or `radau()` are more efficient -- `ode23t` -> `Trapezoid()` for efficiency and `GenericTrapezoid()` for robustness -- `ode23tb` -> `TRBDF2` -- `lsoda` -> `lsoda()` (requires `]add LSODA; using LSODA`) -- `ode15i` -> `IDA()`, though in many cases `Rodas4()` can handle the DAE and is - significantly more efficient - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/formatting_plots.jmd b/tutorials/introduction/formatting_plots.jmd deleted file mode 100644 index bff68849..00000000 --- a/tutorials/introduction/formatting_plots.jmd +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Formatting Plots -author: Chris Rackauckas ---- - -Since the plotting functionality is implemented as a recipe to Plots.jl, [all of the options open to Plots.jl can be used in our plots](https://juliaplots.github.io/supported/). In addition, there are special features specifically for [differential equation plots](http://docs.juliadiffeq.org/latest/basics/plot.html). This tutorial will teach some of the most commonly used options. Let's first get the solution to some ODE. Here I will use one of the Lorenz ordinary differential equation. As with all commands in DifferentialEquations.jl, I got a plot of the solution by calling `solve` on the problem, and `plot` on the solution: - -```julia -using DifferentialEquations, Plots, ParameterizedFunctions -gr() -lorenz = @ode_def Lorenz begin - dx = σ*(y-x) - dy = ρ*x-y-x*z - dz = x*y-β*z -end σ β ρ - -p = [10.0,8/3,28] -u0 = [1., 5., 10.] -tspan = (0., 100.) -prob = ODEProblem(lorenz, u0, tspan, p) -sol = solve(prob) -``` - -```julia -plot(sol) -``` - -Now let's change it to a phase plot. As discussed in the [plot functions page](http://docs.juliadiffeq.org/latest/basics/plot.html), we can use the `vars` command to choose the variables to plot. Let's plot variable `x` vs variable `y` vs variable `z`: - -```julia -plot(sol,vars=(:x,:y,:z)) -``` - -We can also choose to plot the timeseries for a single variable: - -```julia -plot(sol,vars=[:x]) -``` - -Notice that we were able to use the variable names because we had defined the problem with the macro. But in general, we can use the indices. The previous plots would be: - -```julia -plot(sol,vars=(1,2,3)) -plot(sol,vars=[1]) -``` - -Common options are to add titles, axis, and labels. For example: - -```julia -plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line", -xaxis="Time (t)",yaxis="u(t) (in mm)",label=["X","Y","Z"]) -``` - -Notice that series recipes apply to the solution type as well. For example, we can use a scatter plot on the timeseries: - -```julia -scatter(sol,vars=[:x]) -``` - -This shows that the recipe is using the interpolation to smooth the plot. It becomes abundantly clear when we turn it off using `denseplot=false`: - -```julia -plot(sol,vars=(1,2,3),denseplot=false) -``` - -When this is done, only the values the timestep hits are plotted. Using the interpolation usually results in a much nicer looking plot so it's recommended, and since the interpolations have similar orders to the numerical methods, their results are trustworthy on the full interval. We can control the number of points used in the interpolation's plot using the `plotdensity` command: - -```julia -plot(sol,vars=(1,2,3),plotdensity=100) -``` - -That's plotting the entire solution using 100 points spaced evenly in time. - -```julia -plot(sol,vars=(1,2,3),plotdensity=10000) -``` - -That's more like it! By default it uses `100*length(sol)`, where the length is the number of internal steps it had to take. This heuristic usually does well, but unusually difficult equations it can be relaxed (since it will take small steps), and for equations with events / discontinuities raising the plot density can help resolve the discontinuity. - -Lastly notice that we can compose plots. Let's show where the 100 points are using a scatter plot: - -```julia -plot(sol,vars=(1,2,3)) -scatter!(sol,vars=(1,2,3),plotdensity=100) -``` - -We can instead work with an explicit plot object. This form can be better for building a complex plot in a loop. - -```julia -p = plot(sol,vars=(1,2,3)) -scatter!(p,sol,vars=(1,2,3),plotdensity=100) -title!("I added a title") -``` - -You can do all sorts of things. Have fun! - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/ode_introduction.jmd b/tutorials/introduction/ode_introduction.jmd deleted file mode 100644 index b3894846..00000000 --- a/tutorials/introduction/ode_introduction.jmd +++ /dev/null @@ -1,395 +0,0 @@ ---- -title: An Intro to DifferentialEquations.jl -author: Chris Rackauckas ---- - -## Basic Introduction Via Ordinary Differential Equations - -This notebook will get you started with DifferentialEquations.jl by introducing you to the functionality for solving ordinary differential equations (ODEs). The corresponding documentation page is the [ODE tutorial](http://docs.juliadiffeq.org/latest/tutorials/ode_example.html). While some of the syntax may be different for other types of equations, the same general principles hold in each case. Our goal is to give a gentle and thorough introduction that highlights these principles in a way that will help you generalize what you have learned. - -### Background - -If you are new to the study of differential equations, it can be helpful to do a quick background read on [the definition of ordinary differential equations](https://en.wikipedia.org/wiki/Ordinary_differential_equation). We define an ordinary differential equation as an equation which describes the way that a variable $u$ changes, that is - -$$u' = f(u,p,t)$$ - -where $p$ are the parameters of the model, $t$ is the time variable, and $f$ is the nonlinear model of how $u$ changes. The initial value problem also includes the information about the starting value: - -$$u(t_0) = u_0$$ - -Together, if you know the starting value and you know how the value will change with time, then you know what the value will be at any time point in the future. This is the intuitive definition of a differential equation. - -### First Model: Exponential Growth - -Our first model will be the canonical exponential growth model. This model says that the rate of change is proportional to the current value, and is this: - -$$u' = au$$ - -where we have a starting value $u(0)=u_0$. Let's say we put 1 dollar into Bitcoin which is increasing at a rate of $98\%$ per year. Then calling now $t=0$ and measuring time in years, our model is: - -$$u' = 0.98u$$ - -and $u(0) = 1.0$. We encode this into Julia by noticing that, in this setup, we match the general form when - -```julia -f(u,p,t) = 0.98u -``` - -with $ u_0 = 1.0 $. If we want to solve this model on a time span from `t=0.0` to `t=1.0`, then we define an `ODEProblem` by specifying this function `f`, this initial condition `u0`, and this time span as follows: - -```julia -using DifferentialEquations -f(u,p,t) = 0.98u -u0 = 1.0 -tspan = (0.0,1.0) -prob = ODEProblem(f,u0,tspan) -``` - -To solve our `ODEProblem` we use the command `solve`. - -```julia -sol = solve(prob) -``` - -and that's it: we have succesfully solved our first ODE! - -#### Analyzing the Solution - -Of course, the solution type is not interesting in and of itself. We want to understand the solution! The documentation page which explains in detail the functions for analyzing the solution is the [Solution Handling](http://docs.juliadiffeq.org/latest/basics/solution.html) page. Here we will describe some of the basics. You can plot the solution using the plot recipe provided by [Plots.jl](http://docs.juliaplots.org/latest/): - -```julia -using Plots; gr() -plot(sol) -``` - -From the picture we see that the solution is an exponential curve, which matches our intuition. As a plot recipe, we can annotate the result using any of the [Plots.jl attributes](http://docs.juliaplots.org/latest/attributes/). For example: - -```julia -plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line", - xaxis="Time (t)",yaxis="u(t) (in μm)",label="My Thick Line!") # legend=false -``` - -Using the mutating `plot!` command we can add other pieces to our plot. For this ODE we know that the true solution is $u(t) = u_0 exp(at)$, so let's add some of the true solution to our plot: - -```julia -plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!") -``` - -In the previous command I demonstrated `sol.t`, which grabs the array of time points that the solution was saved at: - -```julia -sol.t -``` - -We can get the array of solution values using `sol.u`: - -```julia -sol.u -``` - -`sol.u[i]` is the value of the solution at time `sol.t[i]`. We can compute arrays of functions of the solution values using standard comprehensions, like: - -```julia -[t+u for (u,t) in tuples(sol)] -``` - -However, one interesting feature is that, by default, the solution is a continuous function. If we check the print out again: - -```julia -sol -``` - -you see that it says that the solution has a order changing interpolation. The default algorithm automatically switches between methods in order to handle all types of problems. For non-stiff equations (like the one we are solving), it is a continuous function of 4th order accuracy. We can call the solution as a function of time `sol(t)`. For example, to get the value at `t=0.45`, we can use the command: - -```julia -sol(0.45) -``` - -#### Controlling the Solver - -DifferentialEquations.jl has a common set of solver controls among its algorithms which can be found [at the Common Solver Options](http://docs.juliadiffeq.org/latest/basics/common_solver_opts.html) page. We will detail some of the most widely used options. - -The most useful options are the tolerances `abstol` and `reltol`. These tell the internal adaptive time stepping engine how precise of a solution you want. Generally, `reltol` is the relative accuracy while `abstol` is the accuracy when `u` is near zero. These tolerances are local tolerances and thus are not global guarantees. However, a good rule of thumb is that the total solution accuracy is 1-2 digits less than the relative tolerances. Thus for the defaults `abstol=1e-6` and `reltol=1e-3`, you can expect a global accuracy of about 1-2 digits. If we want to get around 6 digits of accuracy, we can use the commands: - -```julia -sol = solve(prob,abstol=1e-8,reltol=1e-8) -``` - -Now we can see no visible difference against the true solution: - - -```julia -plot(sol) -plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!") -``` - -Notice that by decreasing the tolerance, the number of steps the solver had to take was `9` instead of the previous `5`. There is a trade off between accuracy and speed, and it is up to you to determine what is the right balance for your problem. - -Another common option is to use `saveat` to make the solver save at specific time points. For example, if we want the solution at an even grid of `t=0.1k` for integers `k`, we would use the command: - -```julia -sol = solve(prob,saveat=0.1) -``` - -Notice that when `saveat` is used the continuous output variables are no longer saved and thus `sol(t)`, the interpolation, is only first order. We can save at an uneven grid of points by passing a collection of values to `saveat`. For example: - -```julia -sol = solve(prob,saveat=[0.2,0.7,0.9]) -``` - -If we need to reduce the amount of saving, we can also turn off the continuous output directly via `dense=false`: - -```julia -sol = solve(prob,dense=false) -``` - -and to turn off all intermediate saving we can use `save_everystep=false`: - -```julia -sol = solve(prob,save_everystep=false) -``` - -If we want to solve and only save the final value, we can even set `save_start=false`. - -```julia -sol = solve(prob,save_everystep=false,save_start = false) -``` - -Note that similarly on the other side there is `save_end=false`. - -More advanced saving behaviors, such as saving functionals of the solution, are handled via the `SavingCallback` in the [Callback Library](http://docs.juliadiffeq.org/latest/features/callback_library.html#SavingCallback-1) which will be addressed later in the tutorial. - -#### Choosing Solver Algorithms - -There is no best algorithm for numerically solving a differential equation. When you call `solve(prob)`, DifferentialEquations.jl makes a guess at a good algorithm for your problem, given the properties that you ask for (the tolerances, the saving information, etc.). However, in many cases you may want more direct control. A later notebook will help introduce the various *algorithms* in DifferentialEquations.jl, but for now let's introduce the *syntax*. - -The most crucial determining factor in choosing a numerical method is the stiffness of the model. Stiffness is roughly characterized by a Jacobian `f` with large eigenvalues. That's quite mathematical, and we can think of it more intuitively: if you have big numbers in `f` (like parameters of order `1e5`), then it's probably stiff. Or, as the creator of the MATLAB ODE Suite, Lawrence Shampine, likes to define it, if the standard algorithms are slow, then it's stiff. We will go into more depth about diagnosing stiffness in a later tutorial, but for now note that if you believe your model may be stiff, you can hint this to the algorithm chooser via `alg_hints = [:stiff]`. - -```julia -sol = solve(prob,alg_hints=[:stiff]) -``` - -Stiff algorithms have to solve implicit equations and linear systems at each step so they should only be used when required. - -If we want to choose an algorithm directly, you can pass the algorithm type after the problem as `solve(prob,alg)`. For example, let's solve this problem using the `Tsit5()` algorithm, and just for show let's change the relative tolerance to `1e-6` at the same time: - -```julia -sol = solve(prob,Tsit5(),reltol=1e-6) -``` - -### Systems of ODEs: The Lorenz Equation - -Now let's move to a system of ODEs. The [Lorenz equation](https://en.wikipedia.org/wiki/Lorenz_system) is the famous "butterfly attractor" that spawned chaos theory. It is defined by the system of ODEs: - -$$ -\begin{align} -\frac{dx}{dt} &= \sigma (y - x)\\ -\frac{dy}{dt} &= x (\rho - z) -y\\ -\frac{dz}{dt} &= xy - \beta z -\end{align} -$$ - -To define a system of differential equations in DifferentialEquations.jl, we define our `f` as a vector function with a vector initial condition. Thus, for the vector `u = [x,y,z]'`, we have the derivative function: - -```julia -function lorenz!(du,u,p,t) - σ,ρ,β = p - du[1] = σ*(u[2]-u[1]) - du[2] = u[1]*(ρ-u[3]) - u[2] - du[3] = u[1]*u[2] - β*u[3] -end -``` - -Notice here we used the in-place format which writes the output to the preallocated vector `du`. For systems of equations the in-place format is faster. We use the initial condition $u_0 = [1.0,0.0,0.0]$ as follows: - -```julia -u0 = [1.0,0.0,0.0] -``` - -Lastly, for this model we made use of the parameters `p`. We need to set this value in the `ODEProblem` as well. For our model we want to solve using the parameters $\sigma = 10$, $\rho = 28$, and $\beta = 8/3$, and thus we build the parameter collection: - -```julia -p = (10,28,8/3) # we could also make this an array, or any other type! -``` - -Now we generate the `ODEProblem` type. In this case, since we have parameters, we add the parameter values to the end of the constructor call. Let's solve this on a time span of `t=0` to `t=100`: - -```julia -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan,p) -``` - -Now, just as before, we solve the problem: - -```julia -sol = solve(prob) -``` - -The same solution handling features apply to this case. Thus `sol.t` stores the time points and `sol.u` is an array storing the solution at the corresponding time points. - -However, there are a few extra features which are good to know when dealing with systems of equations. First of all, `sol` also acts like an array. `sol[i]` returns the solution at the `i`th time point. - -```julia -sol.t[10],sol[10] -``` - -Additionally, the solution acts like a matrix where `sol[j,i]` is the value of the `j`th variable at time `i`: - -```julia -sol[2,10] -``` - -We can get a real matrix by performing a conversion: - -```julia -A = Array(sol) -``` - -This is the same as sol, i.e. `sol[i,j] = A[i,j]`, but now it's a true matrix. Plotting will by default show the time series for each variable: - -```julia -plot(sol) -``` - -If we instead want to plot values against each other, we can use the `vars` command. Let's plot variable `1` against variable `2` against variable `3`: - -```julia -plot(sol,vars=(1,2,3)) -``` - -This is the classic Lorenz attractor plot, where the `x` axis is `u[1]`, the `y` axis is `u[2]`, and the `z` axis is `u[3]`. Note that the plot recipe by default uses the interpolation, but we can turn this off: - -```julia -plot(sol,vars=(1,2,3),denseplot=false) -``` - -Yikes! This shows how calculating the continuous solution has saved a lot of computational effort by computing only a sparse solution and filling in the values! Note that in vars, `0=time`, and thus we can plot the time series of a single component like: - -```julia -plot(sol,vars=(0,2)) -``` - -### A DSL for Parameterized Functions - -In many cases you may be defining a lot of functions with parameters. There exists the domain-specific language (DSL) defined by the `@ode_def` macro for helping with this common problem. For example, we can define the Lotka-Volterra equation: - -$$ -\begin{align} -\frac{dx}{dt} &= ax - bxy\\ -\frac{dy}{dt} &= -cy + dxy -\end{align} -$$ - -as follows: - -```julia -function lotka_volterra!(du,u,p,t) - du[1] = p[1]*u[1] - p[2]*u[1]*u[2] - du[2] = -p[3]*u[2] + p[4]*u[1]*u[2] -end -``` - -However, that can be hard to follow since there's a lot of "programming" getting in the way. Instead, you can use the `@ode_def` macro from ParameterizedFunctions.jl: - -```julia -using ParameterizedFunctions -lv! = @ode_def LotkaVolterra begin - dx = a*x - b*x*y - dy = -c*y + d*x*y -end a b c d -``` - -We can then use the result just like an ODE function from before: - -```julia -u0 = [1.0,1.0] -p = (1.5,1.0,3.0,1.0) -tspan = (0.0,10.0) -prob = ODEProblem(lv!,u0,tspan,p) -sol = solve(prob) -plot(sol) -``` - -Not only is the DSL convenient syntax, but it does some magic behind the scenes. For example, further parts of the tutorial will describe how solvers for stiff differential equations have to make use of the Jacobian in calculations. Here, the DSL uses symbolic differentiation to automatically derive that function: - -```julia -lv!.Jex -``` - -The DSL can derive many other functions; this ability is used to speed up the solvers. An extension to DifferentialEquations.jl, [Latexify.jl](https://korsbo.github.io/Latexify.jl/latest/tutorials/parameterizedfunctions.html), allows you to extract these pieces as LaTeX expressions. - -## Internal Types - -The last basic user-interface feature to explore is the choice of types. DifferentialEquations.jl respects your input types to determine the internal types that are used. Thus since in the previous cases, when we used `Float64` values for the initial condition, this meant that the internal values would be solved using `Float64`. We made sure that time was specified via `Float64` values, meaning that time steps would utilize 64-bit floats as well. But, by simply changing these types we can change what is used internally. - -As a quick example, let's say we want to solve an ODE defined by a matrix. To do this, we can simply use a matrix as input. - -```julia -A = [1. 0 0 -5 - 4 -2 4 -3 - -4 0 0 1 - 5 -2 2 3] -u0 = rand(4,2) -tspan = (0.0,1.0) -f(u,p,t) = A*u -prob = ODEProblem(f,u0,tspan) -sol = solve(prob) -``` - -There is no real difference from what we did before, but now in this case `u0` is a `4x2` matrix. Because of that, the solution at each time point is matrix: - -```julia -sol[3] -``` - -In DifferentialEquations.jl, you can use any type that defines `+`, `-`, `*`, `/`, and has an appropriate `norm`. For example, if we want arbitrary precision floating point numbers, we can change the input to be a matrix of `BigFloat`: - -```julia -big_u0 = big.(u0) -``` - -and we can solve the `ODEProblem` with arbitrary precision numbers by using that initial condition: - -```julia -prob = ODEProblem(f,big_u0,tspan) -sol = solve(prob) -``` - -```julia -sol[1,3] -``` - -To really make use of this, we would want to change `abstol` and `reltol` to be small! Notice that the type for "time" is different than the type for the dependent variables, and this can be used to optimize the algorithm via keeping multiple precisions. We can convert time to be arbitrary precision as well by defining our time span with `BigFloat` variables: - -```julia -prob = ODEProblem(f,big_u0,big.(tspan)) -sol = solve(prob) -``` - -Let's end by showing a more complicated use of types. For small arrays, it's usually faster to do operations on static arrays via the package [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl). The syntax is similar to that of normal arrays, but for these special arrays we utilize the `@SMatrix` macro to indicate we want to create a static array. - -```julia -using StaticArrays -A = @SMatrix [ 1.0 0.0 0.0 -5.0 - 4.0 -2.0 4.0 -3.0 - -4.0 0.0 0.0 1.0 - 5.0 -2.0 2.0 3.0] -u0 = @SMatrix rand(4,2) -tspan = (0.0,1.0) -f(u,p,t) = A*u -prob = ODEProblem(f,u0,tspan) -sol = solve(prob) -``` - -```julia -sol[3] -``` - -## Conclusion - -These are the basic controls in DifferentialEquations.jl. All equations are defined via a problem type, and the `solve` command is used with an algorithm choice (or the default) to get a solution. Every solution acts the same, like an array `sol[i]` with `sol.t[i]`, and also like a continuous function `sol(t)` with a nice plot command `plot(sol)`. The Common Solver Options can be used to control the solver for any equation type. Lastly, the types used in the numerical solving are determined by the input types, and this can be used to solve with arbitrary precision and add additional optimizations (this can be used to solve via GPUs for example!). While this was shown on ODEs, these techniques generalize to other types of equations as well. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/optimizing_diffeq_code.jmd b/tutorials/introduction/optimizing_diffeq_code.jmd deleted file mode 100644 index ddd109db..00000000 --- a/tutorials/introduction/optimizing_diffeq_code.jmd +++ /dev/null @@ -1,492 +0,0 @@ ---- -title: Optimizing DiffEq Code -author: Chris Rackauckas ---- - -In this notebook we will walk through some of the main tools for optimizing your code in order to efficiently solve DifferentialEquations.jl. User-side optimizations are important because, for sufficiently difficult problems, most of the time will be spent inside of your `f` function, the function you are trying to solve. "Efficient" integrators are those that reduce the required number of `f` calls to hit the error tolerance. The main ideas for optimizing your DiffEq code, or any Julia function, are the following: - -- Make it non-allocating -- Use StaticArrays for small arrays -- Use broadcast fusion -- Make it type-stable -- Reduce redundant calculations -- Make use of BLAS calls -- Optimize algorithm choice - -We'll discuss these strategies in the context of small and large systems. Let's start with small systems. - -## Optimizing Small Systems (<100 DEs) - -Let's take the classic Lorenz system from before. Let's start by naively writing the system in its out-of-place form: - -```julia -function lorenz(u,p,t) - dx = 10.0*(u[2]-u[1]) - dy = u[1]*(28.0-u[3]) - u[2] - dz = u[1]*u[2] - (8/3)*u[3] - [dx,dy,dz] -end -``` - -Here, `lorenz` returns an object, `[dx,dy,dz]`, which is created within the body of `lorenz`. - -This is a common code pattern from high-level languages like MATLAB, SciPy, or R's deSolve. However, the issue with this form is that it allocates a vector, `[dx,dy,dz]`, at each step. Let's benchmark the solution process with this choice of function: - -```julia -using DifferentialEquations, BenchmarkTools -u0 = [1.0;0.0;0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz,u0,tspan) -@benchmark solve(prob,Tsit5()) -``` - -The BenchmarkTools package's `@benchmark` runs the code multiple times to get an accurate measurement. The minimum time is the time it takes when your OS and other background processes aren't getting in the way. Notice that in this case it takes about 5ms to solve and allocates around 11.11 MiB. However, if we were to use this inside of a real user code we'd see a lot of time spent doing garbage collection (GC) to clean up all of the arrays we made. Even if we turn off saving we have these allocations. - -```julia -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -The problem of course is that arrays are created every time our derivative function is called. This function is called multiple times per step and is thus the main source of memory usage. To fix this, we can use the in-place form to ***make our code non-allocating***: - -```julia -function lorenz!(du,u,p,t) - du[1] = 10.0*(u[2]-u[1]) - du[2] = u[1]*(28.0-u[3]) - u[2] - du[3] = u[1]*u[2] - (8/3)*u[3] -end -``` - -Here, instead of creating an array each time, we utilized the cache array `du`. When the inplace form is used, DifferentialEquations.jl takes a different internal route that minimizes the internal allocations as well. When we benchmark this function, we will see quite a difference. - -```julia -u0 = [1.0;0.0;0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan) -@benchmark solve(prob,Tsit5()) -``` - -```julia -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -There is a 4x time difference just from that change! Notice there are still some allocations and this is due to the construction of the integration cache. But this doesn't scale with the problem size: - -```julia -tspan = (0.0,500.0) # 5x longer than before -prob = ODEProblem(lorenz!,u0,tspan) -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -since that's all just setup allocations. - -#### But if the system is small we can optimize even more. - -Allocations are only expensive if they are "heap allocations". For a more in-depth definition of heap allocations, [there are a lot of sources online](http://net-informations.com/faq/net/stack-heap.htm). But a good working definition is that heap allocations are variable-sized slabs of memory which have to be pointed to, and this pointer indirection costs time. Additionally, the heap has to be managed and the garbage controllers has to actively keep track of what's on the heap. - -However, there's an alternative to heap allocations, known as stack allocations. The stack is statically-sized (known at compile time) and thus its accesses are quick. Additionally, the exact block of memory is known in advance by the compiler, and thus re-using the memory is cheap. This means that allocating on the stack has essentially no cost! - -Arrays have to be heap allocated because their size (and thus the amount of memory they take up) is determined at runtime. But there are structures in Julia which are stack-allocated. `struct`s for example are stack-allocated "value-type"s. `Tuple`s are a stack-allocated collection. The most useful data structure for DiffEq though is the `StaticArray` from the package [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl). These arrays have their length determined at compile-time. They are created using macros attached to normal array expressions, for example: - -```julia -using StaticArrays -A = @SVector [2.0,3.0,5.0] -``` - -Notice that the `3` after `SVector` gives the size of the `SVector`. It cannot be changed. Additionally, `SVector`s are immutable, so we have to create a new `SVector` to change values. But remember, we don't have to worry about allocations because this data structure is stack-allocated. `SArray`s have a lot of extra optimizations as well: they have fast matrix multiplication, fast QR factorizations, etc. which directly make use of the information about the size of the array. Thus, when possible they should be used. - -Unfortunately static arrays can only be used for sufficiently small arrays. After a certain size, they are forced to heap allocate after some instructions and their compile time balloons. Thus static arrays shouldn't be used if your system has more than 100 variables. Additionally, only the native Julia algorithms can fully utilize static arrays. - -Let's ***optimize `lorenz` using static arrays***. Note that in this case, we want to use the out-of-place allocating form, but this time we want to output a static array: - -```julia -function lorenz_static(u,p,t) - dx = 10.0*(u[2]-u[1]) - dy = u[1]*(28.0-u[3]) - u[2] - dz = u[1]*u[2] - (8/3)*u[3] - @SVector [dx,dy,dz] -end -``` - -To make the solver internally use static arrays, we simply give it a static array as the initial condition: - -```julia -u0 = @SVector [1.0,0.0,0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz_static,u0,tspan) -@benchmark solve(prob,Tsit5()) -``` - -```julia -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -And that's pretty much all there is to it. With static arrays you don't have to worry about allocating, so use operations like `*` and don't worry about fusing operations (discussed in the next section). Do "the vectorized code" of R/MATLAB/Python and your code in this case will be fast, or directly use the numbers/values. - -#### Exercise 1 - -Implement the out-of-place array, in-place array, and out-of-place static array forms for the [Henon-Heiles System](https://en.wikipedia.org/wiki/H%C3%A9non%E2%80%93Heiles_system) and time the results. - -## Optimizing Large Systems - -### Interlude: Managing Allocations with Broadcast Fusion - -When your system is sufficiently large, or you have to make use of a non-native Julia algorithm, you have to make use of `Array`s. In order to use arrays in the most efficient manner, you need to be careful about temporary allocations. Vectorized calculations naturally have plenty of temporary array allocations. This is because a vectorized calculation outputs a vector. Thus: - -```julia -A = rand(1000,1000); B = rand(1000,1000); C = rand(1000,1000) -test(A,B,C) = A + B + C -@benchmark test(A,B,C) -``` -That expression `A + B + C` creates 2 arrays. It first creates one for the output of `A + B`, then uses that result array to `+ C` to get the final result. 2 arrays! We don't want that! The first thing to do to fix this is to use broadcast fusion. [Broadcast fusion](https://julialang.org/blog/2017/01/moredots) puts expressions together. For example, instead of doing the `+` operations separately, if we were to add them all at the same time, then we would only have a single array that's created. For example: - -```julia -test2(A,B,C) = map((a,b,c)->a+b+c,A,B,C) -@benchmark test2(A,B,C) -``` - -Puts the whole expression into a single function call, and thus only one array is required to store output. This is the same as writing the loop: - -```julia -function test3(A,B,C) - D = similar(A) - @inbounds for i in eachindex(A) - D[i] = A[i] + B[i] + C[i] - end - D -end -@benchmark test3(A,B,C) -``` - -However, Julia's broadcast is syntactic sugar for this. If multiple expressions have a `.`, then it will put those vectorized operations together. Thus: - -```julia -test4(A,B,C) = A .+ B .+ C -@benchmark test4(A,B,C) -``` - -is a version with only 1 array created (the output). Note that `.`s can be used with function calls as well: - -```julia -sin.(A) .+ sin.(B) -``` - -Also, the `@.` macro applys a dot to every operator: - -```julia -test5(A,B,C) = @. A + B + C #only one array allocated -@benchmark test5(A,B,C) -``` - -Using these tools we can get rid of our intermediate array allocations for many vectorized function calls. But we are still allocating the output array. To get rid of that allocation, we can instead use mutation. Mutating broadcast is done via `.=`. For example, if we pre-allocate the output: - -```julia -D = zeros(1000,1000); -``` - -Then we can keep re-using this cache for subsequent calculations. The mutating broadcasting form is: - -```julia -test6!(D,A,B,C) = D .= A .+ B .+ C #only one array allocated -@benchmark test6!(D,A,B,C) -``` - -If we use `@.` before the `=`, then it will turn it into `.=`: - -```julia -test7!(D,A,B,C) = @. D = A + B + C #only one array allocated -@benchmark test7!(D,A,B,C) -``` - -Notice that in this case, there is no "output", and instead the values inside of `D` are what are changed (like with the DiffEq inplace function). Many Julia functions have a mutating form which is denoted with a `!`. For example, the mutating form of the `map` is `map!`: - -```julia -test8!(D,A,B,C) = map!((a,b,c)->a+b+c,D,A,B,C) -@benchmark test8!(D,A,B,C) -``` - -Some operations require using an alternate mutating form in order to be fast. For example, matrix multiplication via `*` allocates a temporary: - -```julia -@benchmark A*B -``` - -Instead, we can use the mutating form `mul!` into a cache array to avoid allocating the output: - -```julia -using LinearAlgebra -@benchmark mul!(D,A,B) # same as D = A * B -``` - -For repeated calculations this reduced allocation can stop GC cycles and thus lead to more efficient code. Additionally, ***we can fuse together higher level linear algebra operations using BLAS***. The package [SugarBLAS.jl](https://github.com/lopezm94/SugarBLAS.jl) makes it easy to write higher level operations like `alpha*B*A + beta*C` as mutating BLAS calls. - -### Example Optimization: Gierer-Meinhardt Reaction-Diffusion PDE Discretization - -Let's optimize the solution of a Reaction-Diffusion PDE's discretization. In its discretized form, this is the ODE: - -$$ -\begin{align} -du &= D_1 (A_y u + u A_x) + \frac{au^2}{v} + \bar{u} - \alpha u\\ -dv &= D_2 (A_y v + v A_x) + a u^2 + \beta v -\end{align} -$$ - -where $u$, $v$, and $A$ are matrices. Here, we will use the simplified version where $A$ is the tridiagonal stencil $[1,-2,1]$, i.e. it's the 2D discretization of the LaPlacian. The native code would be something along the lines of: - -```julia -# Generate the constants -p = (1.0,1.0,1.0,10.0,0.001,100.0) # a,α,ubar,β,D1,D2 -N = 100 -Ax = Array(Tridiagonal([1.0 for i in 1:N-1],[-2.0 for i in 1:N],[1.0 for i in 1:N-1])) -Ay = copy(Ax) -Ax[2,1] = 2.0 -Ax[end-1,end] = 2.0 -Ay[1,2] = 2.0 -Ay[end,end-1] = 2.0 - -function basic_version!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = r[:,:,1] - v = r[:,:,2] - Du = D1*(Ay*u + u*Ax) - Dv = D2*(Ay*v + v*Ax) - dr[:,:,1] = Du .+ a.*u.*u./v .+ ubar .- α*u - dr[:,:,2] = Dv .+ a.*u.*u .- β*v -end - -a,α,ubar,β,D1,D2 = p -uss = (ubar+β)/α -vss = (a/β)*uss^2 -r0 = zeros(100,100,2) -r0[:,:,1] .= uss.+0.1.*rand.() -r0[:,:,2] .= vss - -prob = ODEProblem(basic_version!,r0,(0.0,0.1),p) -``` - -In this version we have encoded our initial condition to be a 3-dimensional array, with `u[:,:,1]` being the `A` part and `u[:,:,2]` being the `B` part. - -```julia -@benchmark solve(prob,Tsit5()) -``` - -While this version isn't very efficient, - -#### We recommend writing the "high-level" code first, and iteratively optimizing it! - -The first thing that we can do is get rid of the slicing allocations. The operation `r[:,:,1]` creates a temporary array instead of a "view", i.e. a pointer to the already existing memory. To make it a view, add `@view`. Note that we have to be careful with views because they point to the same memory, and thus changing a view changes the original values: - -```julia -A = rand(4) -@show A -B = @view A[1:3] -B[2] = 2 -@show A -``` - -Notice that changing `B` changed `A`. This is something to be careful of, but at the same time we want to use this since we want to modify the output `dr`. Additionally, the last statement is a purely element-wise operation, and thus we can make use of broadcast fusion there. Let's rewrite `basic_version!` to ***avoid slicing allocations*** and to ***use broadcast fusion***: - -```julia -function gm2!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - Du = D1*(Ay*u + u*Ax) - Dv = D2*(Ay*v + v*Ax) - @. du = Du + a.*u.*u./v + ubar - α*u - @. dv = Dv + a.*u.*u - β*v -end -prob = ODEProblem(gm2!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) -``` - -Now, most of the allocations are taking place in `Du = D1*(Ay*u + u*Ax)` since those operations are vectorized and not mutating. We should instead replace the matrix multiplications with `mul!`. When doing so, we will need to have cache variables to write into. This looks like: - -```julia -Ayu = zeros(N,N) -uAx = zeros(N,N) -Du = zeros(N,N) -Ayv = zeros(N,N) -vAx = zeros(N,N) -Dv = zeros(N,N) -function gm3!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - mul!(Ayu,Ay,u) - mul!(uAx,u,Ax) - mul!(Ayv,Ay,v) - mul!(vAx,v,Ax) - @. Du = D1*(Ayu + uAx) - @. Dv = D2*(Ayv + vAx) - @. du = Du + a*u*u./v + ubar - α*u - @. dv = Dv + a*u*u - β*v -end -prob = ODEProblem(gm3!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) -``` - -But our temporary variables are global variables. We need to either declare the caches as `const` or localize them. We can localize them by adding them to the parameters, `p`. It's easier for the compiler to reason about local variables than global variables. ***Localizing variables helps to ensure type stability***. - -```julia -p = (1.0,1.0,1.0,10.0,0.001,100.0,Ayu,uAx,Du,Ayv,vAx,Dv) # a,α,ubar,β,D1,D2 -function gm4!(dr,r,p,t) - a,α,ubar,β,D1,D2,Ayu,uAx,Du,Ayv,vAx,Dv = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - mul!(Ayu,Ay,u) - mul!(uAx,u,Ax) - mul!(Ayv,Ay,v) - mul!(vAx,v,Ax) - @. Du = D1*(Ayu + uAx) - @. Dv = D2*(Ayv + vAx) - @. du = Du + a*u*u./v + ubar - α*u - @. dv = Dv + a*u*u - β*v -end -prob = ODEProblem(gm4!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) -``` - -We could then use the BLAS `gemmv` to optimize the matrix multiplications some more, but instead let's devectorize the stencil. - -```julia -p = (1.0,1.0,1.0,10.0,0.001,100.0,N) -function fast_gm!(du,u,p,t) - a,α,ubar,β,D1,D2,N = p - - @inbounds for j in 2:N-1, i in 2:N-1 - du[i,j,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - - @inbounds for j in 2:N-1, i in 2:N-1 - du[i,j,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds for j in 2:N-1 - i = 1 - du[1,j,1] = D1*(2u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for j in 2:N-1 - i = 1 - du[1,j,2] = D2*(2u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - @inbounds for j in 2:N-1 - i = N - du[end,j,1] = D1*(2u[i-1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for j in 2:N-1 - i = N - du[end,j,2] = D2*(2u[i-1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds for i in 2:N-1 - j = 1 - du[i,1,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for i in 2:N-1 - j = 1 - du[i,1,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - @inbounds for i in 2:N-1 - j = N - du[i,end,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for i in 2:N-1 - j = N - du[i,end,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds begin - i = 1; j = 1 - du[1,1,1] = D1*(2u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[1,1,2] = D2*(2u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = 1; j = N - du[1,N,1] = D1*(2u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[1,N,2] = D2*(2u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = N; j = 1 - du[N,1,1] = D1*(2u[i-1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[N,1,2] = D2*(2u[i-1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = N; j = N - du[end,end,1] = D1*(2u[i-1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[end,end,2] = D2*(2u[i-1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end -end -prob = ODEProblem(fast_gm!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) -``` - -Lastly, we can do other things like multithread the main loops, but these optimizations get the last 2x-3x out. The main optimizations which apply everywhere are the ones we just performed (though the last one only works if your matrix is a stencil. This is known as a matrix-free implementation of the PDE discretization). - -This gets us to about 8x faster than our original MATLAB/SciPy/R vectorized style code! - -The last thing to do is then ***optimize our algorithm choice***. We have been using `Tsit5()` as our test algorithm, but in reality this problem is a stiff PDE discretization and thus one recommendation is to use `CVODE_BDF()`. However, instead of using the default dense Jacobian, we should make use of the sparse Jacobian afforded by the problem. The Jacobian is the matrix $\frac{df_i}{dr_j}$, where $r$ is read by the linear index (i.e. down columns). But since the $u$ variables depend on the $v$, the band size here is large, and thus this will not do well with a Banded Jacobian solver. Instead, we utilize sparse Jacobian algorithms. `CVODE_BDF` allows us to use a sparse Newton-Krylov solver by setting `linear_solver = :GMRES` (see [the solver documentation](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html#Sundials.jl-1), and thus we can solve this problem efficiently. Let's see how this scales as we increase the integration time. - -```julia -prob = ODEProblem(fast_gm!,r0,(0.0,10.0),p) -@benchmark solve(prob,Tsit5()) -``` - -```julia -using Sundials -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES)) -``` - -```julia -prob = ODEProblem(fast_gm!,r0,(0.0,100.0),p) -# Will go out of memory if we don't turn off `save_everystep`! -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -```julia -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES)) -``` - -Now let's check the allocation growth. - -```julia -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) -``` - -```julia -prob = ODEProblem(fast_gm!,r0,(0.0,500.0),p) -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) -``` - -Notice that we've elimated almost all allocations, allowing the code to grow without hitting garbage collection and slowing down. - -Why is `CVODE_BDF` doing well? What's happening is that, because the problem is stiff, the number of steps required by the explicit Runge-Kutta method grows rapidly, whereas `CVODE_BDF` is taking large steps. Additionally, the `GMRES` linear solver form is quite an efficient way to solve the implicit system in this case. This is problem-dependent, and in many cases using a Krylov method effectively requires a preconditioner, so you need to play around with testing other algorithms and linear solvers to find out what works best with your problem. - -## Conclusion - -Julia gives you the tools to optimize the solver "all the way", but you need to make use of it. The main thing to avoid is temporary allocations. For small systems, this is effectively done via static arrays. For large systems, this is done via in-place operations and cache arrays. Either way, the resulting solution can be immensely sped up over vectorized formulations by using these principles. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/models/classical_physics.jmd b/tutorials/models/classical_physics.jmd deleted file mode 100644 index ea3842b7..00000000 --- a/tutorials/models/classical_physics.jmd +++ /dev/null @@ -1,342 +0,0 @@ ---- -title: Classical Physics Models -author: Yingbo Ma, Chris Rackauckas ---- - -If you're getting some cold feet to jump in to DiffEq land, here are some handcrafted differential equations mini problems to hold your hand along the beginning of your journey. - -## Radioactive Decay of Carbon-14 - -#### First order linear ODE - -$$f(t,u) = \frac{du}{dt}$$ - -The Radioactive decay problem is the first order linear ODE problem of an exponential with a negative coefficient, which represents the half-life of the process in question. Should the coefficient be positive, this would represent a population growth equation. - -```julia -using OrdinaryDiffEq, Plots -gr() - -#Half-life of Carbon-14 is 5,730 years. -C₁ = 5.730 - -#Setup -u₀ = 1.0 -tspan = (0.0, 1.0) - -#Define the problem -radioactivedecay(u,p,t) = -C₁*u - -#Pass to solver -prob = ODEProblem(radioactivedecay,u₀,tspan) -sol = solve(prob,Tsit5()) - -#Plot -plot(sol,linewidth=2,title ="Carbon-14 half-life", xaxis = "Time in thousands of years", yaxis = "Percentage left", label = "Numerical Solution") -plot!(sol.t, t->exp(-C₁*t),lw=3,ls=:dash,label="Analytical Solution") -``` - -## Simple Pendulum - -#### Second Order Linear ODE - -We will start by solving the pendulum problem. In the physics class, we often solve this problem by small angle approximation, i.e. $ sin(\theta) \approx \theta$, because otherwise, we get an elliptic integral which doesn't have an analytic solution. The linearized form is - -$$\ddot{\theta} + \frac{g}{L}{\theta} = 0$$ - -But we have numerical ODE solvers! Why not solve the *real* pendulum? - -$$\ddot{\theta} + \frac{g}{L}{\sin(\theta)} = 0$$ - -```julia -# Simple Pendulum Problem -using OrdinaryDiffEq, Plots - -#Constants -const g = 9.81 -L = 1.0 - -#Initial Conditions -u₀ = [0,π/2] -tspan = (0.0,6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L)*sin(θ) -end - -#Pass to solvers -prob = ODEProblem(simplependulum,u₀, tspan) -sol = solve(prob,Tsit5()) - -#Plot -plot(sol,linewidth=2,title ="Simple Pendulum Problem", xaxis = "Time", yaxis = "Height", label = ["Theta","dTheta"]) -``` - -So now we know that behaviour of the position versus time. However, it will be useful to us to look at the phase space of the pendulum, i.e., and representation of all possible states of the system in question (the pendulum) by looking at its velocity and position. Phase space analysis is ubiquitous in the analysis of dynamical systems, and thus we will provide a few facilities for it. - -```julia -p = plot(sol,vars = (1,2), xlims = (-9,9), title = "Phase Space Plot", xaxis = "Velocity", yaxis = "Position", leg=false) -function phase_plot(prob, u0, p, tspan=2pi) - _prob = ODEProblem(prob.f,u0,(0.0,tspan)) - sol = solve(_prob,Vern9()) # Use Vern9 solver for higher accuracy - plot!(p,sol,vars = (1,2), xlims = nothing, ylims = nothing) -end -for i in -4pi:pi/2:4π - for j in -4pi:pi/2:4π - phase_plot(prob, [j,i], p) - end -end -plot(p,xlims = (-9,9)) -``` - -## Simple Harmonic Oscillator - -### Double Pendulum - -```julia -#Double Pendulum Problem -using OrdinaryDiffEq, Plots - -#Constants and setup -const m₁, m₂, L₁, L₂ = 1, 2, 1, 2 -initial = [0, π/3, 0, 3pi/5] -tspan = (0.,50.) - -#Convenience function for transforming from polar to Cartesian coordinates -function polar2cart(sol;dt=0.02,l1=L₁,l2=L₂,vars=(2,4)) - u = sol.t[1]:dt:sol.t[end] - - p1 = l1*map(x->x[vars[1]], sol.(u)) - p2 = l2*map(y->y[vars[2]], sol.(u)) - - x1 = l1*sin.(p1) - y1 = l1*-cos.(p1) - (u, (x1 + l2*sin.(p2), - y1 - l2*cos.(p2))) -end - -#Define the Problem -function double_pendulum(xdot,x,p,t) - xdot[1]=x[2] - xdot[2]=-((g*(2*m₁+m₂)*sin(x[1])+m₂*(g*sin(x[1]-2*x[3])+2*(L₂*x[4]^2+L₁*x[2]^2*cos(x[1]-x[3]))*sin(x[1]-x[3])))/(2*L₁*(m₁+m₂-m₂*cos(x[1]-x[3])^2))) - xdot[3]=x[4] - xdot[4]=(((m₁+m₂)*(L₁*x[2]^2+g*cos(x[1]))+L₂*m₂*x[4]^2*cos(x[1]-x[3]))*sin(x[1]-x[3]))/(L₂*(m₁+m₂-m₂*cos(x[1]-x[3])^2)) -end - -#Pass to Solvers -double_pendulum_problem = ODEProblem(double_pendulum, initial, tspan) -sol = solve(double_pendulum_problem, Vern7(), abs_tol=1e-10, dt=0.05); -``` - -```julia -#Obtain coordinates in Cartesian Geometry -ts, ps = polar2cart(sol, l1=L₁, l2=L₂, dt=0.01) -plot(ps...) -``` - -### Poincaré section - -The Poincaré section is a contour plot of a higher-dimensional phase space diagram. It helps to understand the dynamic interactions and is wonderfully pretty. - -The following equation came from [StackOverflow question](https://mathematica.stackexchange.com/questions/40122/help-to-plot-poincar%C3%A9-section-for-double-pendulum) - -$$\frac{d}{dt} - \begin{pmatrix} - \alpha \\ l_\alpha \\ \beta \\ l_\beta - \end{pmatrix}= - \begin{pmatrix} - 2\frac{l_\alpha - (1+\cos\beta)l_\beta}{3-\cos 2\beta} \\ - -2\sin\alpha - \sin(\alpha + \beta) \\ - 2\frac{-(1+\cos\beta)l_\alpha + (3+2\cos\beta)l_\beta}{3-\cos2\beta}\\ - -\sin(\alpha+\beta) - 2\sin(\beta)\frac{(l_\alpha-l_\beta)l_\beta}{3-\cos2\beta} + 2\sin(2\beta)\frac{l_\alpha^2-2(1+\cos\beta)l_\alpha l_\beta + (3+2\cos\beta)l_\beta^2}{(3-\cos2\beta)^2} - \end{pmatrix}$$ - -The Poincaré section here is the collection of $(β,l_β)$ when $α=0$ and $\frac{dα}{dt}>0$. - -#### Hamiltonian of a double pendulum -Now we will plot the Hamiltonian of a double pendulum - -```julia -#Constants and setup -using OrdinaryDiffEq -initial2 = [0.01, 0.005, 0.01, 0.01] -tspan2 = (0.,200.) - -#Define the problem -function double_pendulum_hamiltonian(udot,u,p,t) - α = u[1] - lα = u[2] - β = u[3] - lβ = u[4] - udot .= - [2(lα-(1+cos(β))lβ)/(3-cos(2β)), - -2sin(α) - sin(α+β), - 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)), - -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)] -end - -# Construct a ContiunousCallback -condition(u,t,integrator) = u[1] -affect!(integrator) = nothing -cb = ContinuousCallback(condition,affect!,nothing, - save_positions = (true,false)) - -# Construct Problem -poincare = ODEProblem(double_pendulum_hamiltonian, initial2, tspan2) -sol2 = solve(poincare, Vern9(), save_everystep = false, callback=cb, abstol=1e-9) - -function poincare_map(prob, u₀, p; callback=cb) - _prob = ODEProblem(prob.f,[0.01, 0.01, 0.01, u₀],prob.tspan) - sol = solve(_prob, Vern9(), save_everystep = false, callback=cb, abstol=1e-9) - scatter!(p, sol, vars=(3,4), markersize = 2) -end -``` - -```julia -p = scatter(sol2, vars=(3,4), leg=false, markersize = 2, ylims=(-0.01,0.03)) -for i in -0.01:0.00125:0.01 - poincare_map(poincare, i, p) -end -plot(p,ylims=(-0.01,0.03)) -``` - -## Hénon-Heiles System - -The Hénon-Heiles potential occurs when non-linear motion of a star around a galactic center with the motion restricted to a plane. - -$$ -\begin{align} -\frac{d^2x}{dt^2}&=-\frac{\partial V}{\partial x}\\ -\frac{d^2y}{dt^2}&=-\frac{\partial V}{\partial y} -\end{align} -$$ - -where - -$$V(x,y)={\frac {1}{2}}(x^{2}+y^{2})+\lambda \left(x^{2}y-{\frac {y^{3}}{3}}\right).$$ - -We pick $\lambda=1$ in this case, so - -$$V(x,y) = \frac{1}{2}(x^2+y^2+2x^2y-\frac{2}{3}y^3).$$ - -Then the total energy of the system can be expressed by - -$$E = T+V = V(x,y)+\frac{1}{2}(\dot{x}^2+\dot{y}^2).$$ - -The total energy should conserve as this system evolves. - -```julia -using OrdinaryDiffEq, Plots - -#Setup -initial = [0.,0.1,0.5,0] -tspan = (0,100.) - -#Remember, V is the potential of the system and T is the Total Kinetic Energy, thus E will -#the total energy of the system. -V(x,y) = 1//2 * (x^2 + y^2 + 2x^2*y - 2//3 * y^3) -E(x,y,dx,dy) = V(x,y) + 1//2 * (dx^2 + dy^2); - -#Define the function -function Hénon_Heiles(du,u,p,t) - x = u[1] - y = u[2] - dx = u[3] - dy = u[4] - du[1] = dx - du[2] = dy - du[3] = -x - 2x*y - du[4] = y^2 - y -x^2 -end - -#Pass to solvers -prob = ODEProblem(Hénon_Heiles, initial, tspan) -sol = solve(prob, Vern9(), abs_tol=1e-16, rel_tol=1e-16); -``` - -```julia -# Plot the orbit -plot(sol, vars=(1,2), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false) -``` - -```julia -#Optional Sanity check - what do you think this returns and why? -@show sol.retcode - -#Plot - -plot(sol, vars=(1,3), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity") -plot!(sol, vars=(2,4), leg = false) -``` - -```julia -#We map the Total energies during the time intervals of the solution (sol.u here) to a new vector -#pass it to the plotter a bit more conveniently -energy = map(x->E(x...), sol.u) - -#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great. -@show ΔE = energy[1]-energy[end] - -#Plot -plot(sol.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") -``` - -### Symplectic Integration - -To prevent energy drift, we can instead use a symplectic integrator. We can directly define and solve the `SecondOrderODEProblem`: - -```julia -function HH_acceleration!(dv,v,u,p,t) - x,y = u - dx,dy = dv - dv[1] = -x - 2x*y - dv[2] = y^2 - y -x^2 -end -initial_positions = [0.0,0.1] -initial_velocities = [0.5,0.0] -prob = SecondOrderODEProblem(HH_acceleration!,initial_velocities,initial_positions,tspan) -sol2 = solve(prob, KahanLi8(), dt=1/10); -``` - -Notice that we get the same results: - -```julia -# Plot the orbit -plot(sol2, vars=(3,4), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false) -``` - -```julia -plot(sol2, vars=(3,1), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity") -plot!(sol2, vars=(4,2), leg = false) -``` - -but now the energy change is essentially zero: - -```julia -energy = map(x->E(x[3], x[4], x[1], x[2]), sol2.u) -#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great. -@show ΔE = energy[1]-energy[end] - -#Plot -plot(sol2.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") -``` - -It's so close to zero it breaks GR! And let's try to use a Runge-Kutta-Nyström solver to solve this. Note that Runge-Kutta-Nyström isn't symplectic. - -```julia -sol3 = solve(prob, DPRKN6()); -energy = map(x->E(x[3], x[4], x[1], x[2]), sol3.u) -@show ΔE = energy[1]-energy[end] -gr() -plot(sol3.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") -``` - -Note that we are using the `DPRKN6` sovler at `reltol=1e-3` (the default), yet it has a smaller energy variation than `Vern9` at `abs_tol=1e-16, rel_tol=1e-16`. Therefore, using specialized solvers to solve its particular problem is very efficient. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/models/conditional_dosing.jmd b/tutorials/models/conditional_dosing.jmd deleted file mode 100644 index 546c5577..00000000 --- a/tutorials/models/conditional_dosing.jmd +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: Conditional Dosing Pharmacometric Example -author: Chris Rackauckas ---- - -In this example we will show how to model a conditional dosing using the `DiscreteCallbacks`. The problem is as follows. The patient has a drug `A(t)` in their system. The concentration of the drug is given as `C(t)=A(t)/V` for some volume constant `V`. At `t=4`, the patient goes to the clinic and is checked. If the concentration of the drug in their body is below `4`, then they will receive a new dose. - -For our model, we will use the simple decay equation. We will write this in the in-place form to make it easy to extend to more complicated examples: - -```julia -using DifferentialEquations -function f(du,u,p,t) - du[1] = -u[1] -end -u0 = [10.0] -const V = 1 -prob = ODEProblem(f,u0,(0.0,10.0)) -``` - -Let's see what the solution looks like without any events. - -```julia -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) -``` - -We see that at time `t=4`, the patient should receive a dose. Let's code up that event. We need to check at `t=4` if the concentration `u[1]/4` is `<4`, and if so, add `10` to `u[1]`. We do this with the following: - -```julia -condition(u,t,integrator) = t==4 && u[1]/V<4 -affect!(integrator) = integrator.u[1] += 10 -cb = DiscreteCallback(condition,affect!) -``` - -Now we will give this callback to the solver, and tell it to stop at `t=4` so that way the condition can be checked: - -```julia -sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb) -using Plots; gr() -plot(sol) -``` - -Let's show that it actually added 10 instead of setting the value to 10. We could have set the value using `affect!(integrator) = integrator.u[1] = 10` - -```julia -println(sol(4.00000)) -println(sol(4.000000000001)) -``` - -Now let's model a patient whose decay rate for the drug is lower: - -```julia -function f(du,u,p,t) - du[1] = -u[1]/6 -end -u0 = [10.0] -const V = 1 -prob = ODEProblem(f,u0,(0.0,10.0)) -``` - -```julia -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) -``` - -Under the same criteria, with the same event, this patient will not receive a second dose: - -```julia -sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb) -using Plots; gr() -plot(sol) -``` - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/models/diffeqbio_II_networkproperties.jmd b/tutorials/models/diffeqbio_II_networkproperties.jmd deleted file mode 100644 index 8aa6f918..00000000 --- a/tutorials/models/diffeqbio_II_networkproperties.jmd +++ /dev/null @@ -1,488 +0,0 @@ ---- -title: "DiffEqBiological Tutorial II: Network Properties API" -author: Samuel Isaacson ---- - -The [DiffEqBiological -API](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html) provides a -collection of functions for easily accessing network properties, and for -incrementally building and extending a network. In this tutorial we'll go -through the API, and then illustrate how to programmatically construct a -network. - -We'll illustrate the API using a toggle-switch like network that contains a -variety of different reaction types: - -```julia -using DifferentialEquations, DiffEqBiological, Latexify, Plots -fmt = :svg -pyplot(fmt=fmt) -rn = @reaction_network begin - hillr(D₂,α,K,n), ∅ --> m₁ - hillr(D₁,α,K,n), ∅ --> m₂ - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - μ, P₁ --> ∅ - μ, P₂ --> ∅ - (k₊,k₋), 2P₁ ↔ D₁ - (k₊,k₋), 2P₂ ↔ D₂ - (k₊,k₋), P₁+P₂ ↔ T -end α K n δ γ β μ k₊ k₋; -``` - -This corresponds to the chemical reaction network given by - -```julia; results="hidden"; -latexify(rn; env=:chemical) -``` -```julia; echo=false; skip="notebook"; -x = latexify(rn; env=:chemical, starred=true, mathjax=true); -display("text/latex", "$x"); -``` - ---- -## Network Properties -[Basic -properties](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Basic-properties-1) -of the generated network include the `speciesmap` and `paramsmap` functions we -examined in the last tutorial, along with the corresponding `species` and -`params` functions: - -```julia -species(rn) -``` -```julia -params(rn) -``` - -The numbers of species, parameters and reactions can be accessed using -`numspecies(rn)`, `numparams(rn)` and `numreactions(rn)`. - -A number of functions are available to access [properties of -reactions](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Reaction-Properties-1) -within the generated network, including `substrates`, `products`, `dependents`, -`ismassaction`, `substratestoich`, `substratesymstoich`, `productstoich`, -`productsymstoich`, and `netstoich`. Each of these functions takes two -arguments, the reaction network `rn` and the index of the reaction to query -information about. For example, to find the substrate symbols and their -corresponding stoichiometries for the 11th reaction, `2P₁ --> D₁`, we would use - -```julia -substratesymstoich(rn, 11) -``` - -Broadcasting works on all these functions, allowing the construction of a vector -holding the queried information across all reactions, i.e. - -```julia -substratesymstoich.(rn, 1:numreactions(rn)) -``` - -To see the net stoichiometries for all reactions we would use - -```julia -netstoich.(rn, 1:numreactions(rn)) -``` - -Here the first integer in each pair corresponds to the index of the species -(with symbol `species(rn)[index]`). The second integer corresponds to the net -stoichiometric coefficient of the species within the reaction. `substratestoich` -and `productstoich` are defined similarly. - -Several functions are also provided that calculate different types of -[dependency -graphs](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Dependency-Graphs-1). -These include `rxtospecies_depgraph`, which provides a mapping from reaction -index to the indices of species whose population changes when the reaction -occurs: - -```julia -rxtospecies_depgraph(rn) -``` - -Here the last row indicates that the species with indices `[3,4,7]` will change -values when the reaction `T --> P₁ + P₂` occurs. To confirm these are the -correct species we can look at - -```julia -species(rn)[[3,4,7]] -``` - -The `speciestorx_depgraph` similarly provides a mapping from species to reactions -for which their *rate laws* depend on that species. These correspond to all reactions -for which the given species is in the `dependent` set of the reaction. We can verify this -for the first species, `m₁`: - -```julia -speciestorx_depgraph(rn)[1] -``` -```julia -findall(depset -> in(:m₁, depset), dependents.(rn, 1:numreactions(rn))) -``` - -Finally, `rxtorx_depgraph` provides a mapping that shows when a given reaction -occurs, which other reactions have rate laws that involve species whose value -would have changed: - -```julia -rxtorx_depgraph(rn) -``` - -#### Note on Using Network Property API Functions -Many basic network query and reaction property functions are simply accessors, -returning information that is already stored within the generated -`reaction_network`. For these functions, modifying the returned data structures -may lead to inconsistent internal state within the network. As such, they should -be used for accessing, but not modifying, network properties. The [API -documentation](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html) -indicates which functions return newly allocated data structures and which -return data stored within the `reaction_network`. - ---- -## Incremental Construction of Networks -The `@reaction_network` macro is monolithic, in that it not only constructs and -stores basic network properties such as the reaction stoichiometries, but also -generates **everything** needed to immediately solve ODE, SDE and jump models -using the network. This includes Jacobian functions, noise functions, and jump -functions for each reaction. While this allows for a compact interface to the -DifferentialEquations.jl solvers, it can also be computationally expensive for -large networks, where a user may only wish to solve one type of problem and/or -have fine-grained control over what is generated. In addition, some types of -reaction network structures are more amenable to being constructed -programmatically, as opposed to writing out all reactions by hand within one -macro. For these reasons DiffEqBiological provides two additional macros that -only *initially* setup basic reaction network properties, and which can be -extended through a programmatic interface: `@min_reaction_network` and -`@empty_reaction_network`. We now give an introduction to constructing these -more minimal network representations, and how they can be programmatically -extended. See also the relevant [API -section](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Reaction-Network-Generation-Macros-1). - -The `@min_reaction_network` macro works identically to the `@reaction_network` -macro, but the generated network will only be complete with respect to its -representation of chemical network properties (i.e. species, parameters and -reactions). No ODE, SDE or jump models are generated during the macro call. It -can subsequently be extended with the addition of new species, parameters or -reactions. The `@empty_reaction_network` allocates an empty network structure -that can also be extended using the programmatic interface. For example, consider -a partial version of the toggle-switch like network we defined above: - -```julia -rnmin = @min_reaction_network begin - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - μ, P₁ --> ∅ - μ, P₂ --> ∅ -end δ γ β μ; -``` - -Here we have left out the first two, and last three, reactions from the original -`reaction_network`. To expand the network until it is functionally equivalent to -the original model we add back in the missing species, parameters, and *finally* -the missing reactions. Note, it is required that species and parameters be -defined before any reactions using them are added. The necessary network -extension functions are given by `addspecies!`, `addparam!` and `addreaction!`, -and described in the -[API](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Functions-to-Add-Species,-Parameters-and-Reactions-to-a-Network-1). To complete `rnmin` we first add the relevant -species: - -```julia -addspecies!(rnmin, :D₁) -addspecies!(rnmin, :D₂) -addspecies!(rnmin, :T) -``` - -Next we add the needed parameters - -```julia -addparam!(rnmin, :α) -addparam!(rnmin, :K) -addparam!(rnmin, :n) -addparam!(rnmin, :k₊) -addparam!(rnmin, :k₋) -``` - -Note, both `addspecies!` and `addparam!` also accept strings encoding the -variable names (which are then converted to `Symbol`s internally). - -We are now ready to add the missing reactions. The API provides two forms of the -`addreaction!` function, one takes expressions analogous to what one would write -in the macro: - -```julia -addreaction!(rnmin, :(hillr(D₁,α,K,n)), :(∅ --> m₂)) -addreaction!(rnmin, :((k₊,k₋)), :(2P₂ ↔ D₂)) -addreaction!(rnmin, :k₊, :(2P₁ --> D₁)) -addreaction!(rnmin, :k₋, :(D₁ --> 2P₁)) -``` - -The rate can be an expression or symbol as above, but can also just be a -numeric value. The second form of `addreaction!` takes tuples of -`Pair{Symbol,Int}` that encode the stoichiometric coefficients of substrates and -reactants: - -```julia -# signature is addreaction!(rnmin, paramexpr, substratestoich, productstoich) -addreaction!(rnmin, :(hillr(D₂,α,K,n)), (), (:m₁ => 1,)) -addreaction!(rnmin, :k₊, (:P₁=>1, :P₂=>1), (:T=>1,)) -addreaction!(rnmin, :k₋, (:T=>1,), (:P₁=>1, :P₂=>1)) -``` - -Let's check that `rn` and `rnmin` have the same set of species: - -```julia -setdiff(species(rn), species(rnmin)) -``` - -the same set of params: - -```julia -setdiff(params(rn), params(rnmin)) -``` - -and the final reaction has the same substrates, reactions, and rate expression: - -```julia -rxidx = numreactions(rn) -setdiff(substrates(rn, rxidx), substrates(rnmin, rxidx)) -``` -```julia -setdiff(products(rn, rxidx), products(rnmin, rxidx)) -``` -```julia -rateexpr(rn, rxidx) == rateexpr(rnmin, rxidx) -``` - ---- -## Extending Incrementally Generated Networks to Include ODEs, SDEs or Jumps -Once a network generated from `@min_reaction_network` or -`@empty_reaction_network` has had all the associated species, parameters and -reactions filled in, corresponding ODE, SDE or jump models can be constructed. -The relevant API functions are `addodes!`, `addsdes!` and `addjumps!`. One -benefit to contructing models with these functions is that they offer more -fine-grained control over what actually gets constructed. For example, -`addodes!` has the optional keyword argument, `build_jac`, which if set to -`false` will disable construction of symbolic Jacobians and functions for -evaluating Jacobians. For large networks this can give a significant speed-up in -the time required for constructing an ODE model. Each function and its -associated keyword arguments are described in the API section, [Functions to add -ODEs, SDEs or Jumps to a -Network](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Functions-to-Add-ODEs,-SDEs-or-Jumps-to-a-Network-1). - -Let's extend `rnmin` to include the needed functions for use in ODE -solvers: - -```julia -addodes!(rnmin) -``` - -The [Generated Functions for -Models](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Generated-Functions-for-Models-1) -section of the API shows what functions have been generated. For ODEs these -include `oderhsfun(rnmin)`, which returns a function of the form `f(du,u,p,t)` -which evaluates the ODEs (i.e. the time derivatives of `u`) within `du`. For -each generated function, the corresponding expressions from which it was -generated can be retrieved using accessors from the [Generated -Expressions](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Generated-Expressions-1) -section of the API. The equations within `du` can be retrieved using the -`odeexprs(rnmin)` function. For example: - -```julia -odeexprs(rnmin) -``` - -Using Latexify we can see the ODEs themselves to compare with these expressions: - -```julia; results="hidden"; -latexify(rnmin) -``` -```julia; echo=false; skip="notebook"; -x = latexify(rnmin, starred=true); -display("text/latex", "$x"); -``` - -For ODEs two other functions are generated by `addodes!`. `jacfun(rnmin)` will -return the generated Jacobian evaluation function, `fjac(dJ,u,p,t)`, which given -the current solution `u` evaluates the Jacobian within `dJ`. -`jacobianexprs(rnmin)` gives the corresponding matrix of expressions, which can -be used with Latexify to see the Jacobian: - -```julia; results="hidden"; -latexify(jacobianexprs(rnmin)) -``` -```julia; echo=false; skip="notebook"; -x = latexify(jacobianexprs(rnmin), starred=true); -display("text/latex", "$x"); -``` - -`addodes!` also generates a function that evaluates the Jacobian of the ODE -derivative functions with respect to the parameters. `paramjacfun(rnmin)` then -returns the generated function. It has the form `fpjac(dPJ,u,p,t)`, which -given the current solution `u` evaluates the Jacobian matrix with respect to -parameters `p` within `dPJ`. For use in DifferentialEquations.jl solvers, an -[`ODEFunction`](http://docs.juliadiffeq.org/latest/features/performance_overloads.html) -representation of the ODEs is available from `odefun(rnmin)`. - -`addsdes!` and `addjumps!` work similarly to complete the network for use in -StochasticDiffEq and DiffEqJump solvers. - -#### Note on Using Generated Function and Expression API Functions -The generated functions and expressions accessible through the API require first -calling the appropriate `addodes!`, `addsdes` or `addjumps` function. These are -responsible for actually constructing the underlying functions and expressions. -The API accessors simply return already constructed functions and expressions -that are stored within the `reaction_network` structure. - ---- -## Example of Generating a Network Programmatically -For a user directly typing in a reaction network, it is generally easier to use -the `@min_reaction_network` or `@reaction_network` macros to fully specify -reactions. However, for large, structured networks it can be much easier to -generate the network programmatically. For very large networks, with tens of -thousands of reactions, the form of `addreaction!` that uses stoichiometric -coefficients should be preferred as it offers substantially better performance. -To put together everything we've seen, let's generate the network corresponding -to a 1D continuous time random walk, approximating the diffusion of molecules -within an interval. - -The basic "reaction" network we wish to study is - -$$ -u_1 \leftrightarrows u_2 \leftrightarrows u_3 \cdots \leftrightarrows u_{N} -$$ - -for $N$ lattice sites on $[0,1]$. For $h = 1/N$ the lattice spacing, we'll -assume the rate molecules hop from their current site to any particular neighbor -is just $h^{-2}$. We can interpret this hopping process as a collection of -$2N-2$ "reactions", with the form $u_i \to u_j$ for $j=i+1$ or $j=i-1$. We construct -the corresponding reaction network as follows. First we set values for the basic -parameters: -```julia -N = 64 -h = 1 / N -``` - -then we create an empty network, and add each species - -```julia -rn = @empty_reaction_network - -for i = 1:N - addspecies!(rn, Symbol(:u, i)) -end -``` - -We next add one parameter `β`, which we will set equal to the hopping rate -of molecules, $h^{-2}$: - -```julia -addparam!(rn, :β) -``` - -Finally, we add in the $2N-2$ possible hopping reactions: -```julia -for i = 1:N - (i < N) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i+1)=>1,)) - (i > 1) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i-1)=>1,)) -end -``` - -Let's first construct an ODE model for the network - -```julia -addodes!(rn) -``` - -We now need to specify the initial condition, parameter vector and time interval -to solve on. We start with 10000 molecules placed at the center of the domain, -and setup an `ODEProblem` to solve: - -```julia -u₀ = zeros(N) -u₀[div(N,2)] = 10000 -p = [1/(h*h)] -tspan = (0.,.01) -oprob = ODEProblem(rn, u₀, tspan, p) -``` - -We are now ready to solve the problem and plot the solution. Since we have -essentially generated a method of lines discretization of the diffusion equation -with a discontinuous initial condition, we'll use an A-L stable implicit ODE -solver, `KenCarp4`, and plot the solution at a few times: - -```julia -sol = solve(oprob, KenCarp4()) -times = [0., .0001, .001, .01] -plt = plot() -for time in times - plot!(plt, 1:N, sol(time), fmt=fmt, xlabel="i", ylabel="uᵢ", label=string("t = ", time), lw=3) -end -plot(plt, ylims=(0.,10000.)) -``` - -Here we see the characteristic diffusion of molecules from the center of the -domain, resulting in a shortening and widening of the solution as $t$ increases. - -Let's now look at a stochastic chemical kinetics jump process version of the -model, where β gives the probability per time each molecule can hop from its -current lattice site to an individual neighboring site. We first add in the -jumps, disabling `regular_jumps` since they are not needed, and using the -`minimal_jumps` flag to construct a minimal representation of the needed jumps. -We then construct a `JumpProblem`, and use the Composition-Rejection Direct -method, `DirectCR`, to simulate the process of the molecules hopping about on -the lattice: - -```julia -addjumps!(rn, build_regular_jumps=false, minimal_jumps=true) - -# make the initial condition integer valued -u₀ = zeros(Int, N) -u₀[div(N,2)] = 10000 - -# setup and solve the problem -dprob = DiscreteProblem(rn, u₀, tspan, p) -jprob = JumpProblem(dprob, DirectCR(), rn, save_positions=(false,false)) -jsol = solve(jprob, SSAStepper(), saveat=times) -``` - -We can now plot bar graphs showing the locations of the molecules at the same -set of times we examined the ODE solution. For comparison, we also plot the -corresponding ODE solutions (red lines) that we found: -```julia -times = [0., .0001, .001, .01] -plts = [] -for i = 1:4 - b = bar(1:N, jsol[i], legend=false, fmt=fmt, xlabel="i", ylabel="uᵢ", title=string("t = ", times[i])) - plot!(b,sol(times[i])) - push!(plts,b) -end -plot(plts...) -``` - -Similar to the ODE solutions, we see that the molecules spread out and become -more and more well-mixed throughout the domain as $t$ increases. The simulation -results are noisy due to the finite numbers of molecules present in the -stochsatic simulation, but since the number of molecules is large they agree -well with the ODE solution at each time. - ---- -## Getting Help -Have a question related to DiffEqBiological or this tutorial? Feel free to ask -in the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby). -If you think you've found a bug in DiffEqBiological, or would like to -request/discuss new functionality, feel free to open an issue on -[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check -there is no related issue already open). If you've found a bug in this tutorial, -or have a suggestion, feel free to open an issue on the [DiffEqTutorials Github -site](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull -request to DiffEqTutorials updating the tutorial! - ---- -```julia; echo=false; skip="notebook" -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) -``` diff --git a/tutorials/models/diffeqbio_I_introduction.jmd b/tutorials/models/diffeqbio_I_introduction.jmd deleted file mode 100644 index 7839db4d..00000000 --- a/tutorials/models/diffeqbio_I_introduction.jmd +++ /dev/null @@ -1,261 +0,0 @@ ---- -title: "DiffEqBiological Tutorial I: Introduction" -author: Samuel Isaacson ---- - -DiffEqBiological.jl is a domain specific language (DSL) for writing chemical -reaction networks in Julia. The generated chemical reaction network model can -then be translated into a variety of mathematical models which can be solved -using components of the broader -[DifferentialEquations.jl](http://juliadiffeq.org/) ecosystem. - -In this tutorial we'll provide an introduction to using DiffEqBiological to -specify chemical reaction networks, and then to solve ODE, jump, tau-leaping and -SDE models generated from them. Let's start by using the DiffEqBiological -`reaction_network` macro to specify a simply chemical reaction network; the -well-known Repressilator. - -We first import the basic packages we'll need, and use Plots.jl for making -figures: - -```julia -# If not already installed, first hit "]" within a Julia REPL. Then type: -# add DifferentialEquations DiffEqBiological PyPlot Plots Latexify - -using DifferentialEquations, DiffEqBiological, Plots, Latexify -pyplot(fmt=:svg); -``` - -We now construct the reaction network. The basic types of arrows and predefined -rate laws one can use are discussed in detail within the DiffEqBiological -[Chemical Reaction Models -documentation](http://docs.juliadiffeq.org/latest/models/biological.html). Here -we use a mix of first order, zero order and repressive Hill function rate laws. -Note, $\varnothing$ corresponds to the empty state, and is used for zeroth order -production and first order degradation reactions: - -```julia -repressilator = @reaction_network begin - hillr(P₃,α,K,n), ∅ --> m₁ - hillr(P₁,α,K,n), ∅ --> m₂ - hillr(P₂,α,K,n), ∅ --> m₃ - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - (δ,γ), m₃ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - β, m₃ --> m₃ + P₃ - μ, P₁ --> ∅ - μ, P₂ --> ∅ - μ, P₃ --> ∅ -end α K n δ γ β μ; -``` - -We can use Latexify to look at the corresponding reactions and understand the -generated rate laws for each reaction - -```julia; results="hidden"; -latexify(repressilator; env=:chemical) -``` -```julia; echo=false; skip="notebook"; -x = latexify(repressilator; env=:chemical, starred=true, mathjax=true); -display("text/latex", "$x"); -``` - -We can also use Latexify to look at the corresponding ODE model for the chemical -system - -```julia; results="hidden"; -latexify(repressilator) -``` -```julia; echo=false; skip="notebook"; -x = latexify(repressilator, starred=true); -display("text/latex", "$x"); -``` - -To solve the ODEs we need to specify the values of the parameters in the model, -the initial condition, and the time interval to solve the model on. To do this -it helps to know the orderings of the parameters and the species. Parameters are -ordered in the same order they appear after the `end` statement in the -`@reaction_network` macro. Species are ordered in the order they first appear -within the `@reaction_network` macro. We can see these orderings using the -`speciesmap` and `paramsmap` functions: - -```julia -speciesmap(repressilator) -``` - -```julia -paramsmap(repressilator) -``` - -## Solving the ODEs: -Knowing these orderings, we can create parameter and initial condition vectors, -and setup the `ODEProblem` we want to solve: - -```julia -# parameters [α,K,n,δ,γ,β,μ] -p = (.5, 40, 2, log(2)/120, 5e-3, 20*log(2)/120, log(2)/60) - -# initial condition [m₁,m₂,m₃,P₁,P₂,P₃] -u₀ = [0.,0.,0.,20.,0.,0.] - -# time interval to solve on -tspan = (0., 10000.) - -# create the ODEProblem we want to solve -oprob = ODEProblem(repressilator, u₀, tspan, p) -``` - -At this point we are all set to solve the ODEs. We can now use any ODE solver -from within the DiffEq package. We'll just use the default DifferentialEquations -solver for now, and then plot the solutions: - -```julia -sol = solve(oprob, saveat=10.) -plot(sol, fmt=:svg) -``` - -We see the well-known oscillatory behavior of the repressilator! For more on -choices of ODE solvers, see the JuliaDiffEq -[documentation](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html). - ---- - -## Stochastic Simulation Algorithms (SSAs) for Stochastic Chemical Kinetics -Let's now look at a stochastic chemical kinetics model of the repressilator, -modeling it with jump processes. Here we will construct a DiffEqJump -`JumpProblem` that uses Gillespie's `Direct` method, and then solve it to -generate one realization of the jump process: - -```julia -# first we redefine the initial condition to be integer valued -u₀ = [0,0,0,20,0,0] - -# next we create a discrete problem to encode that our species are integer valued: -dprob = DiscreteProblem(repressilator, u₀, tspan, p) - -# now we create a JumpProblem, and specify Gillespie's Direct Method as the solver: -jprob = JumpProblem(dprob, Direct(), repressilator, save_positions=(false,false)) - -# now let's solve and plot the jump process: -sol = solve(jprob, SSAStepper(), saveat=10.) -plot(sol, fmt=:svg) -``` - -Here we see that oscillations remain, but become much noiser. Note, in -constructing the `JumpProblem` we could have used any of the SSAs that are part -of DiffEqJump instead of the `Direct` method, see the list of SSAs (i.e. -constant rate jump aggregators) in the -[documentation](http://docs.juliadiffeq.org/latest/types/jump_types.html#Constant-Rate-Jump-Aggregators-1). - ---- -## $\tau$-leaping Methods: -While SSAs generate exact realizations for stochastic chemical kinetics jump -process models, [$\tau$-leaping](https://en.wikipedia.org/wiki/Tau-leaping) -methods offer a performant alternative by discretizing in time the underlying -time-change representation of the stochastic process. The DiffEqJump package has -limited support for $\tau$-leaping methods in the form of the basic Euler's -method type approximation proposed by Gillespie. We can simulate a $\tau$-leap -approximation to the repressilator by using the `RegularJump` representation of -the network to construct a `JumpProblem`: - -```julia -rjs = regularjumps(repressilator) -lprob = JumpProblem(dprob, Direct(), rjs) -lsol = solve(lprob, SimpleTauLeaping(), dt=.1) -plot(lsol, plotdensity=1000, fmt=:svg) -``` - ---- -## Chemical Langevin Equation (CLE) Stochastic Differential Equation (SDE) Models: -At an intermediary physical scale between macroscopic ODE models and microscopic -stochastic chemical kinetic models lies the CLE, a SDE version of the model. The -SDEs add to each ODE above a noise term. As the repressilator has species that -get very close to zero in size, it is not a good candidate to model with the CLE -(where solutions can then go negative and become unphysical). Let's create a -simpler reaction network for a birth-death process that will stay non-negative: - -```julia -bdp = @reaction_network begin - c₁, X --> 2X - c₂, X --> 0 - c₃, 0 --> X -end c₁ c₂ c₃ -p = (1.0,2.0,50.) -u₀ = [5.] -tspan = (0.,4.); -``` - -The corresponding Chemical Langevin Equation SDE is then - -$$ -dX_t = \left(c_1 X - c_2 X + c_3 \right) dt + \left( \sqrt{c_1 X} - \sqrt{c_2 X} + \sqrt{c_3} \right)dW_t, -$$ - -where $W_t$ denotes a standard Brownian Motion. We can solve the CLE SDE model -by creating an SDEProblem and solving it similar to what we did for ODEs above: - -```julia -# SDEProblem for CLE -sprob = SDEProblem(bdp, u₀, tspan, p) - -# solve and plot, tstops is used to specify enough points -# that the plot looks well-resolved -sol = solve(sprob, tstops=range(0., step=4e-3, length=1001)) -plot(sol, fmt=:svg) -``` - -We again have complete freedom to select any of the -StochasticDifferentialEquations.jl SDE solvers, see the -[documentation](http://docs.juliadiffeq.org/latest/solvers/sde_solve.html). - ---- -## What information can be queried from the reaction_network: -The generated `reaction_network` contains a lot of basic information. For example -- `f=oderhsfun(repressilator)` is a function `f(du,u,p,t)` that given the current - state vector `u` and time `t` fills `du` with the time derivatives of `u` - (i.e. the right hand side of the ODEs). -- `jac=jacfun(repressilator)` is a function `jac(J,u,p,t)` that evaluates and - returns the Jacobian of the ODEs in `J`. A corresponding Jacobian matrix of - expressions can be accessed using the `jacobianexprs` function: -```julia; results="hidden"; -latexify(jacobianexprs(repressilator)) -``` -```julia; echo=false; skip="notebook"; -x = latexify(jacobianexprs(repressilator), starred=true); -display("text/latex", "$x"); -``` -- `pjac = paramjacfun(repressilator)` is a function `pjac(pJ,u,p,t)` that - evaluates and returns the Jacobian, `pJ`, of the ODEs *with respect to the - parameters*. This allows `reaction_network`s to be used in the - DifferentialEquations.jl local sensitivity analysis package - [DiffEqSensitivity](http://docs.juliadiffeq.org/latest/analysis/sensitivity.html). - - -By default, generated `ODEProblems` will be passed the corresponding Jacobian -function, which will then be used within implicit ODE/SDE methods. - -The [DiffEqBiological API -documentation](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html) provides -a thorough description of the many query functions that are provided to access -network properties and generated functions. In DiffEqBiological Tutorial II -we'll explore the API. - ---- -## Getting Help -Have a question related to DiffEqBiological or this tutorial? Feel free to ask -in the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby). -If you think you've found a bug in DiffEqBiological, or would like to -request/discuss new functionality, feel free to open an issue on -[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check -there is no related issue already open). If you've found a bug in this tutorial, -or have a suggestion, feel free to open an issue on the [DiffEqTutorials Github -site](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull -request to DiffEqTutorials updating the tutorial! - ---- -```julia; echo=false; skip="notebook" -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) -``` diff --git a/tutorials/models/kepler_problem.jmd b/tutorials/models/kepler_problem.jmd deleted file mode 100644 index abcbc12d..00000000 --- a/tutorials/models/kepler_problem.jmd +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: Kepler Problem -author: Yingbo Ma, Chris Rackauckas ---- - -The Hamiltonian $\mathcal {H}$ and the angular momentum $L$ for the Kepler problem are - -$$\mathcal {H} = \frac{1}{2}(\dot{q}^2_1+\dot{q}^2_2)-\frac{1}{\sqrt{q^2_1+q^2_2}},\quad -L = q_1\dot{q_2} - \dot{q_1}q_2$$ - -Also, we know that - -$${\displaystyle {\frac {\mathrm {d} {\boldsymbol {p}}}{\mathrm {d} t}}=-{\frac {\partial {\mathcal {H}}}{\partial {\boldsymbol {q}}}}\quad ,\quad {\frac {\mathrm {d} {\boldsymbol {q}}}{\mathrm {d} t}}=+{\frac {\partial {\mathcal {H}}}{\partial {\boldsymbol {p}}}}}$$ - -```julia -using OrdinaryDiffEq, LinearAlgebra, ForwardDiff, Plots; gr() -H(q,p) = norm(p)^2/2 - inv(norm(q)) -L(q,p) = q[1]*p[2] - p[1]*q[2] - -pdot(dp,p,q,params,t) = ForwardDiff.gradient!(dp, q->-H(q, p), q) -qdot(dq,p,q,params,t) = ForwardDiff.gradient!(dq, p-> H(q, p), p) - -initial_position = [.4, 0] -initial_velocity = [0., 2.] -initial_cond = (initial_position, initial_velocity) -initial_first_integrals = (H(initial_cond...), L(initial_cond...)) -tspan = (0,20.) -prob = DynamicalODEProblem(pdot, qdot, initial_velocity, initial_position, tspan) -sol = solve(prob, KahanLi6(), dt=1//10); -``` - -Let's plot the orbit and check the energy and angular momentum variation. We know that energy and angular momentum should be constant, and they are also called first integrals. - -```julia -plot_orbit(sol) = plot(sol,vars=(3,4), lab="Orbit", title="Kepler Problem Solution") - -function plot_first_integrals(sol, H, L) - plot(initial_first_integrals[1].-map(u->H(u[2,:], u[1,:]), sol.u), lab="Energy variation", title="First Integrals") - plot!(initial_first_integrals[2].-map(u->L(u[2,:], u[1,:]), sol.u), lab="Angular momentum variation") -end -analysis_plot(sol, H, L) = plot(plot_orbit(sol), plot_first_integrals(sol, H, L)) -``` - -```julia -analysis_plot(sol, H, L) -``` - -Let's try to use a Runge-Kutta-Nyström solver to solve this problem and check the first integrals' variation. - -```julia -sol2 = solve(prob, DPRKN6()) # dt is not necessary, because unlike symplectic - # integrators DPRKN6 is adaptive -@show sol2.u |> length -analysis_plot(sol2, H, L) -``` - -Let's then try to solve the same problem by the `ERKN4` solver, which is specialized for sinusoid-like periodic function - -```julia -sol3 = solve(prob, ERKN4()) # dt is not necessary, because unlike symplectic - # integrators ERKN4 is adaptive -@show sol3.u |> length -analysis_plot(sol3, H, L) -``` - -We can see that `ERKN4` does a bad job for this problem, because this problem is not sinusoid-like. - -One advantage of using `DynamicalODEProblem` is that it can implicitly convert the second order ODE problem to a *normal* system of first order ODEs, which is solvable for other ODE solvers. Let's use the `Tsit5` solver for the next example. - -```julia -sol4 = solve(prob, Tsit5()) -@show sol4.u |> length -analysis_plot(sol4, H, L) -``` - -#### Note - -There is drifting for all the solutions, and high order methods are drifting less because they are more accurate. - -### Conclusion - ---- - -Symplectic integrator does not conserve the energy completely at all time, but the energy can come back. In order to make sure that the energy fluctuation comes back eventually, symplectic integrator has to have a fixed time step. Despite the energy variation, symplectic integrator conserves the angular momentum perfectly. - -Both Runge-Kutta-Nyström and Runge-Kutta integrator do not conserve energy nor the angular momentum, and the first integrals do not tend to come back. An advantage Runge-Kutta-Nyström integrator over symplectic integrator is that RKN integrator can have adaptivity. An advantage Runge-Kutta-Nyström integrator over Runge-Kutta integrator is that RKN integrator has less function evaluation per step. The `ERKN4` solver works best for sinusoid-like solutions. - -## Manifold Projection - -In this example, we know that energy and angular momentum should be conserved. We can achieve this through mainfold projection. As the name implies, it is a procedure to project the ODE solution to a manifold. Let's start with a base case, where mainfold projection isn't being used. - -```julia -using DiffEqCallbacks - -plot_orbit2(sol) = plot(sol,vars=(1,2), lab="Orbit", title="Kepler Problem Solution") - -function plot_first_integrals2(sol, H, L) - plot(initial_first_integrals[1].-map(u->H(u[1:2],u[3:4]), sol.u), lab="Energy variation", title="First Integrals") - plot!(initial_first_integrals[2].-map(u->L(u[1:2],u[3:4]), sol.u), lab="Angular momentum variation") -end - -analysis_plot2(sol, H, L) = plot(plot_orbit2(sol), plot_first_integrals2(sol, H, L)) - -function hamiltonian(du,u,params,t) - q, p = u[1:2], u[3:4] - qdot(@view(du[1:2]), p, q, params, t) - pdot(@view(du[3:4]), p, q, params, t) -end - -prob2 = ODEProblem(hamiltonian, [initial_position; initial_velocity], tspan) -sol_ = solve(prob2, RK4(), dt=1//5, adaptive=false) -analysis_plot2(sol_, H, L) -``` - -There is a significant fluctuation in the first integrals, when there is no mainfold projection. - -```julia -function first_integrals_manifold(residual,u) - residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4]) - residual[3:4] .= initial_first_integrals[2] - L(u[1:2], u[3:4]) -end - -cb = ManifoldProjection(first_integrals_manifold) -sol5 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=cb) -analysis_plot2(sol5, H, L) -``` - -We can see that thanks to the manifold projection, the first integrals' variation is very small, although we are using `RK4` which is not symplectic. But wait, what if we only project to the energy conservation manifold? - -```julia -function energy_manifold(residual,u) - residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4]) - residual[3:4] .= 0 -end -energy_cb = ManifoldProjection(energy_manifold) -sol6 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=energy_cb) -analysis_plot2(sol6, H, L) -``` - -There is almost no energy variation but angular momentum varies quite bit. How about only project to the angular momentum conservation manifold? - -```julia -function angular_manifold(residual,u) - residual[1:2] .= initial_first_integrals[2] - L(u[1:2], u[3:4]) - residual[3:4] .= 0 -end -angular_cb = ManifoldProjection(angular_manifold) -sol7 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=angular_cb) -analysis_plot2(sol7, H, L) -``` - -Again, we see what we expect. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/models/outer_solar_system.jmd b/tutorials/models/outer_solar_system.jmd deleted file mode 100644 index 18004417..00000000 --- a/tutorials/models/outer_solar_system.jmd +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: The Outer Solar System -author: Yingbo Ma, Chris Rackauckas ---- - -## Data - -The chosen units are: masses relative to the sun, so that the sun has mass $1$. We have taken $m_0 = 1.00000597682$ to take account of the inner planets. Distances are in astronomical units , times in earth days, and the gravitational constant is thus $G = 2.95912208286 \cdot 10^{−4}$. - -| planet | mass | initial position | initial velocity | -| --- | --- | --- | --- | -| Jupiter | $m_1 = 0.000954786104043$ |
  • −3.5023653
  • −3.8169847
  • −1.5507963
|
  • 0.00565429
  • −0.00412490
  • −0.00190589
-| Saturn | $m_2 = 0.000285583733151$ |
  • 9.0755314
  • −3.0458353
  • −1.6483708
|
  • 0.00168318
  • 0.00483525
  • 0.00192462
-| Uranus | $m_3 = 0.0000437273164546$ |
  • 8.3101420
  • −16.2901086
  • −7.2521278
|
  • 0.00354178
  • 0.00137102
  • 0.00055029
-| Neptune | $m_4 = 0.0000517759138449$ |
  • 11.4707666
  • −25.7294829
  • −10.8169456
|
  • 0.00288930
  • 0.00114527
  • 0.00039677
-| Pluto | $ m_5 = 1/(1.3 \cdot 10^8 )$ |
  • −15.5387357
  • −25.2225594
  • −3.1902382
|
  • 0.00276725
  • −0.00170702
  • −0.00136504
- -The data is taken from the book "Geometric Numerical Integration" by E. Hairer, C. Lubich and G. Wanner. - -```julia -using Plots, OrdinaryDiffEq, DiffEqPhysics, RecursiveArrayTools -gr() - -G = 2.95912208286e-4 -M = [1.00000597682, 0.000954786104043, 0.000285583733151, 0.0000437273164546, 0.0000517759138449, 1/1.3e8] -planets = ["Sun", "Jupiter", "Saturn", "Uranus", "Neptune", "Pluto"] - -pos_x = [0.0,-3.5023653,9.0755314,8.3101420,11.4707666,-15.5387357] -pos_y = [0.0,-3.8169847,-3.0458353,-16.2901086,-25.7294829,-25.2225594] -pos_z = [0.0,-1.5507963,-1.6483708,-7.2521278,-10.8169456,-3.1902382] -pos = ArrayPartition(pos_x,pos_y,pos_z) - -vel_x = [0.0,0.00565429,0.00168318,0.00354178,0.00288930,0.00276725] -vel_y = [0.0,-0.00412490,0.00483525,0.00137102,0.00114527,-0.00170702] -vel_z = [0.0,-0.00190589,0.00192462,0.00055029,0.00039677,-0.00136504] -vel = ArrayPartition(vel_x,vel_y,vel_z) - -tspan = (0.,200_000) -``` - -The N-body problem's Hamiltonian is - -$$H(p,q) = \frac{1}{2}\sum_{i=0}^{N}\frac{p_{i}^{T}p_{i}}{m_{i}} - G\sum_{i=1}^{N}\sum_{j=0}^{i-1}\frac{m_{i}m_{j}}{\left\lVert q_{i}-q_{j} \right\rVert}$$ - -Here, we want to solve for the motion of the five outer planets relative to the sun, namely, Jupiter, Saturn, Uranus, Neptune and Pluto. - -```julia -const ∑ = sum -const N = 6 -potential(p, t, x, y, z, M) = -G*∑(i->∑(j->(M[i]*M[j])/sqrt((x[i]-x[j])^2 + (y[i]-y[j])^2 + (z[i]-z[j])^2), 1:i-1), 2:N) -``` - -## Hamiltonian System - -`NBodyProblem` constructs a second order ODE problem under the hood. We know that a Hamiltonian system has the form of - -$$\dot{p} = -H_{q}(p,q)\quad \dot{q}=H_{p}(p,q)$$ - -For an N-body system, we can symplify this as: - -$$\dot{p} = -\nabla{V}(q)\quad \dot{q}=M^{-1}p.$$ - -Thus $\dot{q}$ is defined by the masses. We only need to define $\dot{p}$, and this is done internally by taking the gradient of $V$. Therefore, we only need to pass the potential function and the rest is taken care of. - -```julia -nprob = NBodyProblem(potential, M, pos, vel, tspan) -sol = solve(nprob,Yoshida6(), dt=100); -``` - -```julia -orbitplot(sol,body_names=planets) -``` - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/ode_extras/feagin.jmd b/tutorials/ode_extras/feagin.jmd deleted file mode 100644 index 155c94d2..00000000 --- a/tutorials/ode_extras/feagin.jmd +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Feagin's Order 10, 12, and 14 Methods -author: Chris Rackauckas ---- - -DifferentialEquations.jl includes Feagin's explicit Runge-Kutta methods of orders 10/8, 12/10, and 14/12. These methods have such high order that it's pretty much required that one uses numbers with more precision than Float64. As a prerequisite reference on how to use arbitrary number systems (including higher precision) in the numerical solvers, please see the Solving Equations in With Chosen Number Types notebook. - -## Investigation of the Method's Error - -We can use Feagin's order 16 method as follows. Let's use a two-dimensional linear ODE. Like in the Solving Equations in With Chosen Number Types notebook, we change the initial condition to BigFloats to tell the solver to use BigFloat types. - -```julia -using DifferentialEquations -const linear_bigα = big(1.01) -f(u,p,t) = (linear_bigα*u) - -# Add analytical solution so that errors are checked -f_analytic(u0,p,t) = u0*exp(linear_bigα*t) -ff = ODEFunction(f,analytic=f_analytic) -prob = ODEProblem(ff,big(0.5),(0.0,1.0)) -sol = solve(prob,Feagin14(),dt=1//16,adaptive=false); -``` - -```julia -println(sol.errors) -``` - -Compare that to machine $\epsilon$ for Float64: - -```julia -eps(Float64) -``` - -The error for Feagin's method when the stepsize is 1/16 is 8 orders of magnitude below machine $\epsilon$! However, that is dependent on the stepsize. If we instead use adaptive timestepping with the default tolerances, we get - -```julia -sol =solve(prob,Feagin14()); -println(sol.errors); print("The length was $(length(sol))") -``` - -Notice that when the stepsize is much higher, the error goes up quickly as well. These super high order methods are best when used to gain really accurate approximations (using still modest timesteps). Some examples of where such precision is necessary is astrodynamics where the many-body problem is highly chaotic and thus sensitive to small errors. - -## Convergence Test - -The Order 14 method is awesome, but we need to make sure it's really that awesome. The following convergence test is used in the package tests in order to make sure the implementation is correct. Note that all methods have such tests in place. - -```julia -using DiffEqDevTools -dts = 1.0 ./ 2.0 .^(10:-1:4) -sim = test_convergence(dts,prob,Feagin14()) -``` - -For a view of what's going on, let's plot the simulation results. - -```julia -using Plots -gr() -plot(sim) -``` - -This is a clear trend indicating that the convergence is truly Order 14, which -is the estimated slope. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/ode_extras/monte_carlo_parameter_estim.jmd b/tutorials/ode_extras/monte_carlo_parameter_estim.jmd deleted file mode 100644 index dbcd243a..00000000 --- a/tutorials/ode_extras/monte_carlo_parameter_estim.jmd +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: Monte Carlo Parameter Estimation From Data -author: Chris Rackauckas ---- - -First you want to create a problem which solves multiple problems at the same time. This is the Monte Carlo Problem. When the parameter estimation tools say it will take any DEProblem, it really means ANY DEProblem! - -So, let's get a Monte Carlo problem setup that solves with 10 different initial conditions. - -```julia -using DifferentialEquations, DiffEqParamEstim, Plots, Optim - -# Monte Carlo Problem Set Up for solving set of ODEs with different initial conditions - -# Set up Lotka-Volterra system -function pf_func(du,u,p,t) - du[1] = p[1] * u[1] - p[2] * u[1]*u[2] - du[2] = -3 * u[2] + u[1]*u[2] -end -p = [1.5,1.0] -prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),p) -``` - -Now for a MonteCarloProblem we have to take this problem and tell it what to do N times via the prob_func. So let's generate N=10 different initial conditions, and tell it to run the same problem but with these 10 different initial conditions each time: - -```julia -# Setting up to solve the problem N times (for the N different initial conditions) -N = 10; -initial_conditions = [[1.0,1.0], [1.0,1.5], [1.5,1.0], [1.5,1.5], [0.5,1.0], [1.0,0.5], [0.5,0.5], [2.0,1.0], [1.0,2.0], [2.0,2.0]] -function prob_func(prob,i,repeat) - ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p) -end -monte_prob = MonteCarloProblem(prob,prob_func=prob_func) -``` - -We can check this does what we want by solving it: - -```julia -# Check above does what we want -sim = solve(monte_prob,Tsit5(),num_monte=N) -plot(sim) -``` - -num_monte=N means "run N times", and each time it runs the problem returned by the prob_func, which is always the same problem but with the ith initial condition. - -Now let's generate a dataset from that. Let's get data points at every t=0.1 using saveat, and then convert the solution into an array. - -```julia -# Generate a dataset from these runs -data_times = 0.0:0.1:10.0 -sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times) -data = Array(sim) -``` - -Here, data[i,j,k] is the same as sim[i,j,k] which is the same as sim[k][i,j] (where sim[k] is the kth solution). So data[i,j,k] is the jth timepoint of the ith variable in the kth trajectory. - -Now let's build a loss function. A loss function is some loss(sol) that spits out a scalar for how far from optimal we are. In the documentation I show that we normally do loss = L2Loss(t,data), but we can bootstrap off of this. Instead lets build an array of N loss functions, each one with the correct piece of data. - -```julia -# Building a loss function -losses = [L2Loss(data_times,data[:,:,i]) for i in 1:N] -``` - -So losses[i] is a function which computes the loss of a solution against the data of the ith trajectory. So to build our true loss function, we sum the losses: - -```julia -loss(sim) = sum(losses[i](sim[i]) for i in 1:N) -``` - -As a double check, make sure that loss(sim) outputs zero (since we generated the data from sim). Now we generate data with other parameters: - -```julia -prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),[1.2,0.8]) -function prob_func(prob,i,repeat) - ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p) -end -monte_prob = MonteCarloProblem(prob,prob_func=prob_func) -sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times) -loss(sim) -``` - -and get a non-zero loss. So we now have our problem, our data, and our loss function... we have what we need. - -Put this into build_loss_objective. - -```julia -obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N, - saveat=data_times) -``` - -Notice that I added the kwargs for solve into this. They get passed to an internal solve command, so then the loss is computed on N trajectories at data_times. - -Thus we take this objective function over to any optimization package. I like to do quick things in Optim.jl. Here, since the Lotka-Volterra equation requires positive parameters, I use Fminbox to make sure the parameters stay positive. I start the optimization with [1.3,0.9], and Optim spits out that the true parameters are: - -```julia -lower = zeros(2) -upper = fill(2.0,2) -result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS())) -``` - -```julia -result -``` - -Optim finds one but not the other parameter. - -I would run a test on synthetic data for your problem before using it on real data. Maybe play around with different optimization packages, or add regularization. You may also want to decrease the tolerance of the ODE solvers via - -```julia -obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N, - abstol=1e-8,reltol=1e-8, - saveat=data_times) -result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS())) -``` - -```julia -result -``` - -if you suspect error is the problem. However, if you're having problems it's most likely not the ODE solver tolerance and mostly because parameter inference is a very hard optimization problem. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/ode_extras/ode_minmax.jmd b/tutorials/ode_extras/ode_minmax.jmd deleted file mode 100644 index e88b8f9d..00000000 --- a/tutorials/ode_extras/ode_minmax.jmd +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Finding Maxima and Minima of DiffEq Solutions -author: Chris Rackauckas ---- - -### Setup - -In this tutorial we will show how to use Optim.jl to find the maxima and minima of solutions. Let's take a look at the double pendulum: - -```julia -#Constants and setup -using OrdinaryDiffEq -initial = [0.01, 0.01, 0.01, 0.01] -tspan = (0.,100.) - -#Define the problem -function double_pendulum_hamiltonian(udot,u,p,t) - α = u[1] - lα = u[2] - β = u[3] - lβ = u[4] - udot .= - [2(lα-(1+cos(β))lβ)/(3-cos(2β)), - -2sin(α) - sin(α+β), - 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)), - -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)] -end - -#Pass to solvers -poincare = ODEProblem(double_pendulum_hamiltonian, initial, tspan) -``` - -```julia -sol = solve(poincare, Tsit5()) -``` - -In time, the solution looks like: - -```julia -using Plots; gr() -plot(sol, vars=[(0,3),(0,4)], leg=false, plotdensity=10000) -``` - -while it has the well-known phase-space plot: - -```julia -plot(sol, vars=(3,4), leg=false) -``` - -### Local Optimization - -Let's fine out what some of the local maxima and minima are. Optim.jl can be used to minimize functions, and the solution type has a continuous interpolation which can be used. Let's look for the local optima for the 4th variable around `t=20`. Thus our optimization function is: - -```julia -f = (t) -> sol(t,idxs=4) -``` - -`first(t)` is the same as `t[1]` which transforms the array of size 1 into a number. `idxs=4` is the same as `sol(first(t))[4]` but does the calculation without a temporary array and thus is faster. To find a local minima, we can simply call Optim on this function. Let's find a local minimum: - -```julia -using Optim -opt = optimize(f,18.0,22.0) -``` - -From this printout we see that the minimum is at `t=18.63` and the value is `-2.79e-2`. We can get these in code-form via: - -```julia -println(opt.minimizer) -println(opt.minimum) -``` - -To get the maximum, we just minimize the negative of the function: - -```julia -f = (t) -> -sol(first(t),idxs=4) -opt2 = optimize(f,0.0,22.0) -``` - -Let's add the maxima and minima to the plots: - -```julia -plot(sol, vars=(0,4), plotdensity=10000) -scatter!([opt.minimizer],[opt.minimum],label="Local Min") -scatter!([opt2.minimizer],[-opt2.minimum],label="Local Max") -``` - -Brent's method will locally minimize over the full interval. If we instead want a local maxima nearest to a point, we can use `BFGS()`. In this case, we need to optimize a vector `[t]`, and thus dereference it to a number using `first(t)`. - -```julia -f = (t) -> -sol(first(t),idxs=4) -opt = optimize(f,[20.0],BFGS()) -``` - -### Global Optimization - -If we instead want to find global maxima and minima, we need to look somewhere else. For this there are many choices. A pure Julia option is BlackBoxOptim.jl, but I will use NLopt.jl. Following the NLopt.jl tutorial but replacing their function with out own: - -```julia -import NLopt, ForwardDiff - -count = 0 # keep track of # function evaluations - -function g(t::Vector, grad::Vector) - if length(grad) > 0 - #use ForwardDiff for the gradients - grad[1] = ForwardDiff.derivative((t)->sol(first(t),idxs=4),t) - end - sol(first(t),idxs=4) -end -opt = NLopt.Opt(:GN_ORIG_DIRECT_L, 1) -NLopt.lower_bounds!(opt, [0.0]) -NLopt.upper_bounds!(opt, [40.0]) -NLopt.xtol_rel!(opt,1e-8) -NLopt.min_objective!(opt, g) -(minf,minx,ret) = NLopt.optimize(opt,[20.0]) -println(minf," ",minx," ",ret) -NLopt.max_objective!(opt, g) -(maxf,maxx,ret) = NLopt.optimize(opt,[20.0]) -println(maxf," ",maxx," ",ret) -``` - -```julia -plot(sol, vars=(0,4), plotdensity=10000) -scatter!([minx],[minf],label="Global Min") -scatter!([maxx],[maxf],label="Global Max") -``` - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/test.jmd b/tutorials/test.jmd deleted file mode 100644 index a17a9e18..00000000 --- a/tutorials/test.jmd +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Test -author: Chris Rackauckas ---- - -This is a test of the builder system. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/type_handling/number_types.jmd b/tutorials/type_handling/number_types.jmd deleted file mode 100644 index f7c6ff07..00000000 --- a/tutorials/type_handling/number_types.jmd +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: Solving Equations in With Julia-Defined Types -author: Chris Rackauckas ---- - -One of the nice things about DifferentialEquations.jl is that it is designed with Julia's type system in mind. What this means is, if you have properly defined a Number type, you can use this number type in DifferentialEquations.jl's algorithms! [Note that this is restricted to the native algorithms of OrdinaryDiffEq.jl. The other solvers such as ODE.jl, Sundials.jl, and ODEInterface.jl are not compatible with some number systems.] - -DifferentialEquations.jl determines the numbers to use in its solvers via the types that are designated by `tspan` and the initial condition of the problem. It will keep the time values in the same type as tspan, and the solution values in the same type as the initial condition. [Note that adaptive timestepping requires that the time type is compaible with `sqrt` and `^` functions. Thus dt cannot be Integer or numbers like that if adaptive timestepping is chosen]. - -Let's solve the linear ODE first define an easy way to get ODEProblems for the linear ODE: - -```julia -using DifferentialEquations -f = (u,p,t) -> (p*u) -prob_ode_linear = ODEProblem(f,1/2,(0.0,1.0),1.01); -``` - -First let's solve it using Float64s. To do so, we just need to set u0 to a Float64 (which is done by the default) and dt should be a float as well. - -```julia -prob = prob_ode_linear -sol =solve(prob,Tsit5()) -println(sol) -``` - -Notice that both the times and the solutions were saved as Float64. Let's change the time to use rational values. Rationals are not compatible with adaptive time stepping since they do not have an L2 norm (this can be worked around by defining `internalnorm`, but rationals already explode in size!). To account for this, let's turn off adaptivity as well: - -```julia -prob = ODEProblem(f,1/2,(0//1,1//1),101//100); -sol = solve(prob,RK4(),dt=1//2^(6),adaptive=false) -println(sol) -``` - -Now let's do something fun. Let's change the solution to use `Rational{BigInt}` and print out the value at the end of the simulation. To do so, simply change the definition of the initial condition. - -```julia -prob = ODEProblem(f,BigInt(1)//BigInt(2),(0//1,1//1),101//100); -sol =solve(prob,RK4(),dt=1//2^(6),adaptive=false) -println(sol[end]) -``` - -That's one huge fraction! - -## Other Compatible Number Types - -#### BigFloats - -```julia -prob_ode_biglinear = ODEProblem(f,big(1.0)/big(2.0),(big(0.0),big(1.0)),big(1.01)) -sol =solve(prob_ode_biglinear,Tsit5()) -println(sol[end]) -``` - -#### DoubleFloats.jl - -There's are Float128-like types. Higher precision, but fixed and faster than arbitrary precision. - -```julia -using DoubleFloats -prob_ode_doublelinear = ODEProblem(f,Double64(1)/Double64(2),(Double64(0),Double64(1)),Double64(1.01)) -sol =solve(prob_ode_doublelinear,Tsit5()) -println(sol[end]) -``` - -#### ArbFloats - -These high precision numbers which are much faster than Bigs for less than 500-800 bits of accuracy. - -```julia -using ArbNumerics -prob_ode_arbfloatlinear = ODEProblem(f,ArbFloat(1)/ArbFloat(2),(ArbFloat(0.0),ArbFloat(1.0)),ArbFloat(1.01)) -sol =solve(prob_ode_arbfloatlinear,Tsit5()) -println(sol[end]) -``` - -## Incompatible Number Systems - -#### DecFP.jl - -Next let's try DecFP. DecFP is a fixed-precision decimals library which is made to give both performance but known decimals of accuracy. Having already installed DecFP with `]add DecFP`, I can run the following: - -```julia -using DecFP -prob_ode_decfplinear = ODEProblem(f,Dec128(1)/Dec128(2),(Dec128(0.0),Dec128(1.0)),Dec128(1.01)) -sol =solve(prob_ode_decfplinear,Tsit5()) -println(sol[end]); println(typeof(sol[end])) -``` - -#### Decimals.jl - -Install with `]add Decimals`. - -```julia -using Decimals -prob_ode_decimallinear = ODEProblem(f,[decimal("1.0")]./[decimal("2.0")],(0//1,1//1),decimal(1.01)) -sol =solve(prob_ode_decimallinear,RK4(),dt=1/2^(6)) #Fails -println(sol[end]); println(typeof(sol[end])) -``` - -At the time of writing this, Decimals are not compatible. This is not on DifferentialEquations.jl's end, it's on partly on Decimal's end since it is not a subtype of Number. Thus it's not recommended you use Decimals with DifferentialEquations.jl - -## Conclusion - -As you can see, DifferentialEquations.jl can use arbitrary Julia-defined number systems in its arithmetic. If you need 128-bit floats, i.e. a bit more precision but not arbitrary, DoubleFloats.jl is a very good choice! For arbitrary precision, ArbNumerics are the most feature-complete and give great performance compared to BigFloats, and thus I recommend their use when high-precision (less than 512-800 bits) is required. DecFP is a great library for high-performance decimal numbers and works well as well. Other number systems could use some modernization. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/type_handling/uncertainties.jmd b/tutorials/type_handling/uncertainties.jmd deleted file mode 100644 index 3583f0f4..00000000 --- a/tutorials/type_handling/uncertainties.jmd +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Numbers with Uncertainties -author: Mosè Giordano, Chris Rackauckas ---- - -The result of a measurement should be given as a number with an attached uncertainties, besides the physical unit, and all operations performed involving the result of the measurement should propagate the uncertainty, taking care of correlation between quantities. - -There is a Julia package for dealing with numbers with uncertainties: [`Measurements.jl`](https://github.com/JuliaPhysics/Measurements.jl). Thanks to Julia's features, `DifferentialEquations.jl` easily works together with `Measurements.jl` out-of-the-box. - -This notebook will cover some of the examples from the tutorial about classical Physics. - -## Caveat about `Measurement` type - -Before going on with the tutorial, we must point up a subtlety of `Measurements.jl` that you should be aware of: - -```julia -using Measurements - -5.23 ± 0.14 === 5.23 ± 0.14 -``` - -```julia -(5.23± 0.14) - (5.23 ± 0.14) -``` - -```julia -(5.23 ± 0.14) / (5.23 ± 0.14) -``` - -The two numbers above, even though have the same nominal value and the same uncertainties, are actually two different measurements that only by chance share the same figures and their difference and their ratio have a non-zero uncertainty. It is common in physics to get very similar, or even equal, results for a repeated measurement, but the two measurements are not the same thing. - -Instead, if you have *one measurement* and want to perform some operations involving it, you have to assign it to a variable: - -```julia -x = 5.23 ± 0.14 -x === x -``` - -```julia -x - x -``` - -```julia -x / x -``` - -## Radioactive Decay of Carbon-14 - -The rate of decay of carbon-14 is governed by a first order linear ordinary differential equation - -$$\frac{\mathrm{d}u(t)}{\mathrm{d}t} = -\frac{u(t)}{\tau}$$ - -where $\tau$ is the mean lifetime of carbon-14, which is related to the half-life $t_{1/2} = (5730 \pm 40)$ years by the relation $\tau = t_{1/2}/\ln(2)$. - -```julia -using DifferentialEquations, Measurements, Plots - -# Half-life and mean lifetime of radiocarbon, in years -t_12 = 5730 ± 40 -τ = t_12 / log(2) - -#Setup -u₀ = 1 ± 0 -tspan = (0.0, 10000.0) - -#Define the problem -radioactivedecay(u,p,t) = - u / τ - -#Pass to solver -prob = ODEProblem(radioactivedecay, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-8) - -# Analytic solution -u = exp.(- sol.t / τ) - -plot(sol.t, sol.u, label = "Numerical", xlabel = "Years", ylabel = "Fraction of Carbon-14") -plot!(sol.t, u, label = "Analytic") -``` - -The two curves are perfectly superimposed, indicating that the numerical solution matches the analytic one. We can check that also the uncertainties are correctly propagated in the numerical solution: - -```julia -println("Quantity of carbon-14 after ", sol.t[11], " years:") -println("Numerical: ", sol[11]) -println("Analytic: ", u[11]) -``` - -Both the value of the numerical solution and its uncertainty match the analytic solution within the requested tolerance. We can also note that close to 5730 years after the beginning of the decay (half-life of the radioisotope), the fraction of carbon-14 that survived is about 0.5. - -## Simple pendulum - -### Small angles approximation - -The next problem we are going to study is the simple pendulum in the approximation of small angles. We address this simplified case because there exists an easy analytic solution to compare. - -The differential equation we want to solve is - -$$\ddot{\theta} + \frac{g}{L} \theta = 0$$ - -where $g = (9.79 \pm 0.02)~\mathrm{m}/\mathrm{s}^2$ is the gravitational acceleration measured where the experiment is carried out, and $L = (1.00 \pm 0.01)~\mathrm{m}$ is the length of the pendulum. - -When you set up the problem for `DifferentialEquations.jl` remember to define the measurements as variables, as seen above. - -```julia -using DifferentialEquations, Measurements, Plots - -g = 9.79 ± 0.02; # Gravitational constants -L = 1.00 ± 0.01; # Length of the pendulum - -#Initial Conditions -u₀ = [0 ± 0, π / 60 ± 0.01] # Initial speed and initial angle -tspan = (0.0, 6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L)*θ -end - -#Pass to solvers -prob = ODEProblem(simplependulum, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-6) - -# Analytic solution -u = u₀[2] .* cos.(sqrt(g / L) .* sol.t) - -plot(sol.t, getindex.(sol.u, 2), label = "Numerical") -plot!(sol.t, u, label = "Analytic") -``` - -Also in this case there is a perfect superimposition between the two curves, including their uncertainties. - -We can also have a look at the difference between the two solutions: - -```julia -plot(sol.t, getindex.(sol.u, 2) .- u, label = "") -``` - -## Arbitrary amplitude - -Now that we know how to solve differential equations involving numbers with uncertainties we can solve the simple pendulum problem without any approximation. This time the differential equation to solve is the following: - -$$\ddot{\theta} + \frac{g}{L} \sin(\theta) = 0$$ - -```julia -g = 9.79 ± 0.02; # Gravitational constants -L = 1.00 ± 0.01; # Length of the pendulum - -#Initial Conditions -u₀ = [0 ± 0, π / 3 ± 0.02] # Initial speed and initial angle -tspan = (0.0, 6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L) * sin(θ) -end - -#Pass to solvers -prob = ODEProblem(simplependulum, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-6) - -plot(sol.t, getindex.(sol.u, 2), label = "Numerical") -``` - -We note that in this case the period of the oscillations is not constant. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/type_handling/unitful.jmd b/tutorials/type_handling/unitful.jmd deleted file mode 100644 index 17105b7c..00000000 --- a/tutorials/type_handling/unitful.jmd +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Unit Checked Arithmetic via Unitful.jl -author: Chris Rackauckas ---- - -Units and dimensional analysis are standard tools across the sciences for checking the correctness of your equation. However, most ODE solvers only allow for the equation to be in dimensionless form, leaving it up to the user to both convert the equation to a dimensionless form, punch in the equations, and hopefully not make an error along the way. - -DifferentialEquations.jl allows for one to use Unitful.jl to have unit-checked arithmetic natively in the solvers. Given the dispatch implementation of the Unitful, this has little overhead. - -## Using Unitful - -To use Unitful, you need to have the package installed. Then you can add units to your variables. For example: - -```julia; wrap=false -using Unitful -t = 1.0u"s" -``` - -Notice that `t` is a variable with units in seconds. If we make another value with seconds, they can add - -```julia; wrap=false -t2 = 1.02u"s" -t+t2 -``` - -and they can multiply: - -```julia; wrap=false -t*t2 -``` - -You can even do rational roots: - -```julia; wrap=false -sqrt(t) -``` - -Many operations work. These operations will check to make sure units are correct, and will throw an error for incorrect operations: - -```julia; wrap=false -t + sqrt(t) -``` - -## Using Unitful with DifferentialEquations.jl - -Just like with other number systems, you can choose the units for your numbers by simply specifying the units of the initial condition and the timestep. For example, to solve the linear ODE where the variable has units of Newton's and `t` is in Seconds, we would use: - -```julia; wrap=false -using DifferentialEquations -f = (y,p,t) -> 0.5*y -u0 = 1.5u"N" -prob = ODEProblem(f,u0,(0.0u"s",1.0u"s")) -sol = solve(prob,Tsit5()) -``` - -Notice that we recieved a unit mismatch error. This is correctly so! Remember that for an ODE: - -$$\frac{dy}{dt} = f(t,y)$$ - -we must have that `f` is a rate, i.e. `f` is a change in `y` per unit time. So we need to fix the units of `f` in our example to be `N/s`. Notice that we then do not receive an error if we do the following: - -```julia; wrap=false -f = (y,p,t) -> 0.5*y/3.0u"s" -prob = ODEProblem(f,u0,(0.0u"s",1.0u"s")) -sol = solve(prob,Tsit5()) -``` - -This gives a a normal solution object. Notice that the values are all with the correct units: - -```julia; wrap=false -print(sol[:]) -``` - -We can plot the solution by removing the units: - -```julia; wrap=false -using Plots -gr() -plot(ustrip(sol.t),ustrip(sol[:]),lw=3) -``` - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/weave_tutorials.jl b/weave_tutorials.jl new file mode 100644 index 00000000..a6052620 --- /dev/null +++ b/weave_tutorials.jl @@ -0,0 +1,16 @@ +using SciMLTutorials +target = ARGS[1] +if isdir(target) + if !isfile(joinpath(target, "Project.toml")) + error("Cannot weave folder $(target) without Project.toml!") + end + println("Weaving the $(target) folder") + SciMLTutorials.weave_folder(target) +elseif isfile(target) + folder = dirname(target)[11:end] # remove the tutorials/ + file = basename(target) + println("Weaving $(folder)/$(file)") + SciMLTutorials.weave_file(folder, file) +else + error("Unable to find weaving target $(target)!") +end