diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml new file mode 100644 index 00000000..3494a9f1 --- /dev/null +++ b/.JuliaFormatter.toml @@ -0,0 +1,3 @@ +style = "sciml" +format_markdown = true +format_docstrings = true diff --git a/.buildkite/.gitignore b/.buildkite/.gitignore new file mode 100644 index 00000000..46de5d5e --- /dev/null +++ b/.buildkite/.gitignore @@ -0,0 +1 @@ +ssh_deploy.key diff --git a/.buildkite/0_webui.yml b/.buildkite/0_webui.yml new file mode 100644 index 00000000..af44a7d7 --- /dev/null +++ b/.buildkite/0_webui.yml @@ -0,0 +1,28 @@ +agents: + queue: "juliaecosystem" + sandbox.jl: true + +steps: + - label: ":unlock: Launch tutorials build if hash check successful" + branches: "!gh-pages" + plugins: + - staticfloat/cryptic#v2: + signed_pipelines: + - pipeline: .buildkite/launch_tutorials.yml + signature_file: .buildkite/launch_tutorials.yml.signature + inputs: + - .buildkite/run_tutorial.yml + - .buildkite/publish_tutorials_output.sh + allow_hash_override: true + command: "true" + + - label: ":runner: Dynamically launch test suite" + plugins: + - staticfloat/forerunner: + # This will create one job overall, throwing all path information away + watch: + - "src/**/*.jl" + - "src/*.jl" + - "**/*.toml" + target: .buildkite/test_sciml.yml + target_type: simple diff --git a/.buildkite/cryptic_repo_keys/.gitignore b/.buildkite/cryptic_repo_keys/.gitignore new file mode 100644 index 00000000..f84d0896 --- /dev/null +++ b/.buildkite/cryptic_repo_keys/.gitignore @@ -0,0 +1,7 @@ + +# Ignore the unencrypted repo_key +repo_key + +# Ignore any agent keys (public or private) we have stored +agent_key* + diff --git a/.buildkite/cryptic_repo_keys/repo_key.2297e5e7 b/.buildkite/cryptic_repo_keys/repo_key.2297e5e7 new file mode 100644 index 00000000..6065ce29 Binary files /dev/null and b/.buildkite/cryptic_repo_keys/repo_key.2297e5e7 differ diff --git a/.buildkite/launch_test_sciml.yml b/.buildkite/launch_test_sciml.yml new file mode 100644 index 00000000..85079b4a --- /dev/null +++ b/.buildkite/launch_test_sciml.yml @@ -0,0 +1,16 @@ +agents: + queue: "juliaecosystem" + sandbox.jl: true + +steps: + - label: ":runner: Dynamically launch test_sciml" + branches: "!gh-pages" + plugins: + - staticfloat/forerunner: + # This will create one job overall, throwing all path information away + watch: + - "src/**/*.jl" + - "src/*.jl" + - "**/*.toml" + target: .buildkite/test_sciml.yml + target_type: simple diff --git a/.buildkite/launch_tutorials.yml b/.buildkite/launch_tutorials.yml new file mode 100644 index 00000000..81eebe74 --- /dev/null +++ b/.buildkite/launch_tutorials.yml @@ -0,0 +1,19 @@ +agents: + queue: "juliaecosystem" + sandbox.jl: true + +steps: + - label: ":runner: Dynamically launch run_tutorial.yml" + branches: "!gh-pages" + env: + BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET: ${BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET?} + depends_on: + plugins: + - staticfloat/forerunner: + # This will create one job per project + watch: + - tutorials/**/*.jmd + - tutorials/**/*.toml + path_processor: .buildkite/path_processors/project-coalescing + target: .buildkite/run_tutorial.yml + target_type: template \ No newline at end of file diff --git a/.buildkite/launch_tutorials.yml.signature b/.buildkite/launch_tutorials.yml.signature new file mode 100644 index 00000000..8f286f74 --- /dev/null +++ b/.buildkite/launch_tutorials.yml.signature @@ -0,0 +1,2 @@ +Salted__ +HX+D;HN2qhb=c$J0~0~dх3A܉YrB{?󒟭z P \ No newline at end of file diff --git a/.buildkite/path_processors/project-coalescing b/.buildkite/path_processors/project-coalescing new file mode 100755 index 00000000..26a0f5d8 --- /dev/null +++ b/.buildkite/path_processors/project-coalescing @@ -0,0 +1,62 @@ +#!/bin/bash + +# When a `.jmd` file is modified, it gets rewritten by itself; but when a `.toml` file +# (such as a `Project.toml` or a `Manifest.toml`) gets modified, we rebuild the entire +# directory. To avoid double-building, we coalesce all changes here, by converting +# changed files that end in `.toml` to their directory, then dropping all other files +# within that folder. + +# This will hold all files that need to be rebuilt, keyed by path and pointing to their +# containing project +declare -A FILES + +# This will hold all projects that need to be rebuilt, and will allow us to suppress +# values from FILES_TO_RUN +declare -A PROJECTS + +# Helper function to find the directory that contains the `Project.toml` for this file +function find_project() { + d="${1}" + # We define a basecase, that the path must begin with `tutorials` and is not allowed + # to move outside of that subtree. + while [[ "${d}" =~ tutorials/.* ]]; do + if [[ -f "${d}/Project.toml" ]]; then + echo "${d}" + return + fi + d="$(dirname "${d}")" + done +} + +# For each file, find its project, then if its a `.jmd` file, we add it to `FILES` +# If it's a `.toml` file, we add it to `PROJECTS`. +for f in "$@"; do + proj=$(find_project "${f}") + if [[ -z "${proj}" ]]; then + buildkite-agent annotate "Unable to find project for ${f}" --style "error" + continue + fi + + if [[ "${f}" == *.jmd ]]; then + FILES["${f}"]="${proj}" + elif [[ "${f}" == *.toml ]]; then + PROJECTS["${proj}"]=1 + else + buildkite-agent annotate "Unknown weave type for file ${f}" --style "error" + fi +done + +# We're going to emit the project directories first: +BUILD_TARGETS="${!PROJECTS[@]}" + +# But we're also going to emit any single files whose projects are _not_ contained +# in the projects we're already building +for f in "${!FILES[@]}"; do + proj=${FILES[$f]} + if ! [ ${PROJECTS[$proj]+x} ]; then + BUILD_TARGETS="${BUILD_TARGETS} ${f}" + fi +done + +# Output the build targets +echo "${BUILD_TARGETS}" diff --git a/.buildkite/publish_tutorials_output.sh b/.buildkite/publish_tutorials_output.sh new file mode 100755 index 00000000..c4b0535f --- /dev/null +++ b/.buildkite/publish_tutorials_output.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Ensure that our git wants to talk to github without prompting +mkdir -p ~/.ssh +ssh-keyscan github.com >> ~/.ssh/known_hosts +git config --global user.email "buildkite@julialang.org" +git config --global user.name "SciML Tutorials CI" + +# Clone SciMLTutorialsOutput to temporary directory +temp_dir=$(mktemp -d) +git -C "${temp_dir}" clone git@github.com:SciML/SciMLTutorialsOutput . + +# Copy our output artifacts into it: +for d in docs html notebook pdf script markdown; do + cp -vRa "${d}/" "${temp_dir}" +done +cp -va *.md *.bib "${temp_dir}" + +# Commit the result up to output +set -e +git -C "${temp_dir}" add . +git -C "${temp_dir}" commit -m "Automatic build\nPublished by build of: ${BUILDKITE_REPO%.git}/commit/${BUILDKITE_COMMIT}" +git -C "${temp_dir}" push + +rm -rf "${temp_dir}" diff --git a/.buildkite/run_tutorial.yml b/.buildkite/run_tutorial.yml new file mode 100644 index 00000000..1bf05b99 --- /dev/null +++ b/.buildkite/run_tutorial.yml @@ -0,0 +1,94 @@ +agents: + queue: "juliaecosystem" + sandbox.jl: true + arch: "x86_64" + +# This is a pipeline that weaves a tutorial, then uploads the resultant +# .PDF and other reports as (buildkite, not Julia) artifacts. The `coppermind` +# configuration memoizes the result, so that identical inputs don't get +# weaved multiple times. +steps: + - label: ":hammer: {PATH}" + key: "tutorial-{SANITIZED_PATH}" + env: + BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET: ${BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET?} + plugins: + - staticfloat/cryptic#v2: + variables: + - BUILDKITE_S3_ACCESS_KEY_ID="U2FsdGVkX1/ckce1vUF8A17rHLxcAlAou4aokaeS8YL6omsA1Vq1IDZko5cL1Z+t" + - BUILDKITE_S3_SECRET_ACCESS_KEY="U2FsdGVkX1+SPF81nkK7KQ64DsafSl0qq2iG7BsQs1xlTYEtZV3MqQl3l/NWaiocaEywZZFbAB5zpnKPD0xHTQ==" + - BUILDKITE_S3_DEFAULT_REGION="U2FsdGVkX1/cORlxhXcxhja2JkqC0f8RmaGYxvGBbEg=" + - JuliaCI/julia#v1: + version: 1.8 + - staticfloat/sandbox: + rootfs_url: "https://jc-rootfs-images.s3.amazonaws.com/aws_uploader-2021-11-12.x86_64.tar.gz" + rootfs_treehash: "986217e5b36efd3b3b91ed90df8e36d628cf543f" + workspaces: + # Include the julia we just downloaded + - "/cache/julia-buildkite-plugin:/cache/julia-buildkite-plugin" + - staticfloat/coppermind#v1: + inputs: + # We are sensitive to the actual tutorial changing + - {PATH} + # We are sensitive to the source code of this package changing + - src/**/*.jl + # We are sensitive to our overall dependencies changing + - ./*.toml + outputs: + #- html/**/*.html + - markdown/**/figures/*.png + - markdown/**/*.md + - notebook/**/*.ipynb + - pdf/**/*.pdf + - script/**/*.jl + s3_prefix: s3://julialang-buildkite-artifacts/scimltutorials + timeout_in_minutes: 1000 + commands: | + # Instantiate, to install the overall project dependencies + echo "--- Instantiate" + julia --project=. -e 'using Pkg; Pkg.instantiate(); Pkg.build()' + + # Run tutorial + echo "+++ Run tutorial for {PATH}" + julia --project=. weave_tutorials.jl "{PATH}" + + - label: ":rocket: Publish {PATH}" + env: + BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET: ${BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET?} + plugins: + - staticfloat/cryptic#v2: + variables: + - BUILDKITE_S3_ACCESS_KEY_ID="U2FsdGVkX1/ckce1vUF8A17rHLxcAlAou4aokaeS8YL6omsA1Vq1IDZko5cL1Z+t" + - BUILDKITE_S3_SECRET_ACCESS_KEY="U2FsdGVkX1+SPF81nkK7KQ64DsafSl0qq2iG7BsQs1xlTYEtZV3MqQl3l/NWaiocaEywZZFbAB5zpnKPD0xHTQ==" + - BUILDKITE_S3_DEFAULT_REGION="U2FsdGVkX1/cORlxhXcxhja2JkqC0f8RmaGYxvGBbEg=" + files: + - .buildkite/ssh_deploy.key + - JuliaCI/julia#v1: + version: 1.8 + - staticfloat/sandbox: + rootfs_url: "https://jc-rootfs-images.s3.amazonaws.com/aws_uploader-2021-11-12.x86_64.tar.gz" + rootfs_treehash: "986217e5b36efd3b3b91ed90df8e36d628cf543f" + workspaces: + # Include the julia we just downloaded + - "/cache/julia-buildkite-plugin:/cache/julia-buildkite-plugin" + # Use coppermind to download the tutorial results that were calculated in the + # weaving job above. Note we still list `outputs` here, since we have the + # option to extract only a subset of them here. + - staticfloat/coppermind#v1: + input_from: "tutorial-{SANITIZED_PATH}" + outputs: + #- html/**/*.html + - markdown/**/figures/*.png + - markdown/**/*.md + - notebook/**/*.ipynb + - pdf/**/*.pdf + - script/**/*.jl + s3_prefix: s3://julialang-buildkite-artifacts/scimltutorials + - staticfloat/ssh-agent: + keyfiles: + - .buildkite/ssh_deploy.key + commands: .buildkite/publish_tutorials_output.sh + # Don't run this unless we're on the master branch, and not until the actual weave + # command has had a chance to run. + depends_on: "tutorial-{SANITIZED_PATH}" + branches: "master" diff --git a/.buildkite/ssh_deploy.key.encrypted b/.buildkite/ssh_deploy.key.encrypted new file mode 100644 index 00000000..9e0edc3a Binary files /dev/null and b/.buildkite/ssh_deploy.key.encrypted differ diff --git a/.buildkite/test_sciml.yml b/.buildkite/test_sciml.yml new file mode 100644 index 00000000..ca906117 --- /dev/null +++ b/.buildkite/test_sciml.yml @@ -0,0 +1,24 @@ +agents: + queue: "juliaecosystem" + arch: "x86_64" + +steps: + - label: ":julia: Run tests on 1.8" + plugins: + - JuliaCI/julia#v1: + version: 1.8 + - JuliaCI/julia-test#v1: + timeout_in_minutes: 20 + artifact_paths: + # Upload .html + - "html/Testing/*.html" + # Upload markdown + - "markdown/Testing/*.md" + # Upload notebook + - "notebook/Testing/*.ipynb" + # Upload .pdf files + - "pdf/Testing/*.pdf" + # Upload Julia script + - "script/Testing/*.jl" + agents: + queue: "juliaecosystem" diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..700707ce --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/CompatHelper.yml b/.github/workflows/CompatHelper.yml index d1162ce1..c3e22990 100644 --- a/.github/workflows/CompatHelper.yml +++ b/.github/workflows/CompatHelper.yml @@ -2,25 +2,25 @@ name: CompatHelper on: schedule: - - cron: '00 * * * *' + - cron: '00 00 * * *' issues: types: [opened, reopened] jobs: build: - runs-on: ${{ matrix.os }} - strategy: - matrix: - julia-version: [1.2.0] - julia-arch: [x86] - os: [ubuntu-latest] + runs-on: ubuntu-latest steps: - - uses: julia-actions/setup-julia@latest - with: - version: ${{ matrix.julia-version }} + - uses: actions/checkout@v5 - name: Pkg.add("CompatHelper") run: julia -e 'using Pkg; Pkg.add("CompatHelper")' - name: CompatHelper.main() env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: julia -e 'using CompatHelper; CompatHelper.main()' \ No newline at end of file + run: | + julia -e ' + using CompatHelper + dirs = filter( + d -> isdir(d) && isfile(joinpath(d, "Project.toml")), + readdir("tutorials"; join=true), + ) + CompatHelper.main(; subdirs=["", dirs...])' diff --git a/.github/workflows/TagBot.yml b/.github/workflows/TagBot.yml new file mode 100644 index 00000000..f49313b6 --- /dev/null +++ b/.github/workflows/TagBot.yml @@ -0,0 +1,15 @@ +name: TagBot +on: + issue_comment: + types: + - created + workflow_dispatch: +jobs: + TagBot: + if: github.event_name == 'workflow_dispatch' || github.actor == 'JuliaTagBot' + runs-on: ubuntu-latest + steps: + - uses: JuliaRegistries/TagBot@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + ssh: ${{ secrets.DOCUMENTER_KEY }} diff --git a/.gitignore b/.gitignore index 031fc290..06b4ac54 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,18 @@ -*.jl.cov -*.jl.*.cov -*.jl.mem .ipynb_checkpoints +*/.ipynb_checkpoints/* *.tmp *.aux *.log *.out *.tex -Manifest.toml +tmp*/ +gks.svg +/*/*/jl_*/ +/Manifest.toml + +# We're going to store these in a separate repository now +html/ +script/ +pdf/ +notebook/ +markdown/ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 00000000..d33fe3aa --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,8 @@ +include: https://raw.githubusercontent.com/SciML/RebuildAction/master/rebuild.yml +variables: + CONTENT_DIR: tutorials + EXCLUDE: exercises/02-workshop_solutions, models/06-pendulum_bayesian_inference + GITHUB_REPOSITORY: SciML/SciMLTutorials.jl + GPU_TAG: nvidia-benchmark + NEEDS_GPU: advanced/01-beeler_reuter + TAGS: nvidia-benchmark diff --git a/CNAME b/CNAME deleted file mode 100644 index ec163fd0..00000000 --- a/CNAME +++ /dev/null @@ -1 +0,0 @@ -tutorials.juliadiffeq.org \ No newline at end of file diff --git a/LICENSE.md b/LICENSE.md index 5946f9e2..6ec510bc 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,4 +1,4 @@ -The DiffEqTutorials.jl package is licensed under the MIT "Expat" License: +The SciMLTutorials.jl package is licensed under the MIT "Expat" License: > Copyright (c) 2016: ChrisRackauckas. > @@ -19,4 +19,3 @@ The DiffEqTutorials.jl package is licensed under the MIT "Expat" License: > LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, > OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE > SOFTWARE. -> diff --git a/Project.toml b/Project.toml index 7e21deaf..d8316660 100644 --- a/Project.toml +++ b/Project.toml @@ -1,86 +1,18 @@ -name = "DiffEqTutorials" -uuid = "6d1b261a-3be8-11e9-3f2f-0b112a9a8436" +name = "SciMLTutorials" +uuid = "30cb0354-2223-46a9-baa0-41bdcfbe0178" authors = ["Chris Rackauckas "] -version = "0.2.0" +version = "1.0.0" [deps] -AlgebraicMultigrid = "2169fc97-5a83-5252-b627-83903c6c433c" -ArbNumerics = "7e558dbc-694d-5a72-987c-6f4ebed21442" -BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" -CUDAnative = "be33ccc6-a3ff-5ff2-a52e-74243cff1e17" -Cairo = "159f3aea-2a34-519c-b102-8c37f9878175" -CuArrays = "3a865a2d-5b23-5a0f-bc46-62713ec82fae" -DecFP = "55939f99-70c6-5e9b-8bb0-5071ed7d61fd" -Decimals = "abce61dc-4473-55a0-ba07-351d65e31d42" -DiffEqBayes = "ebbdde9d-f333-5424-9be2-dbf1e9acfb5e" -DiffEqBiological = "eb300fae-53e8-50a0-950c-e21f52c2b7e0" -DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" -DiffEqDevTools = "f3b72e0c-5b89-59e1-b016-84e28bfd966d" -DiffEqOperators = "9fdde737-9c7f-55bf-ade8-46b3f136cc48" -DiffEqParamEstim = "1130ab10-4a5a-5621-a13d-e4788d82bd4c" -DiffEqPhysics = "055956cb-9e8b-5191-98cc-73ae4a59e68a" -DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" -Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" -DoubleFloats = "497a8b3b-efae-58df-a0af-a86822472b78" -ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" IJulia = "7073ff75-c697-5162-941a-fcdaad2a7d2a" InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" -Latexify = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -Measurements = "eff96d63-e80a-5855-80a2-b1b0885c5ab7" -ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" -NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" -Optim = "429524aa-4258-5aef-a3af-852621145aeb" -OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -ParameterizedFunctions = "65888b18-ceab-5e60-b2b9-181511a3b968" +Markdown = "d6f4376e-aef5-505a-96c1-9c027394607a" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -PyPlot = "d330b81b-6aea-500a-939a-2ce795aea3ee" -RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" -SparseDiffTools = "47a9eef4-7e08-11e9-0b38-333d64bd3804" -SparsityDetection = "684fba80-ace3-11e9-3d08-3bc7ed6f96df" -StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" -StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" -Sundials = "c3572dad-4567-51f8-b174-8c6c989267f4" -Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" Weave = "44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9" [compat] -AlgebraicMultigrid = "0.2" -ArbNumerics = "1.0" -BenchmarkTools = "0.4" -CUDAnative = "2.5" -Cairo = "0.8, 1.0" -CuArrays = "1.4" -DecFP = "0.4" -Decimals = "0.4" -DiffEqBayes = "2.1" -DiffEqBiological = "4.0" -DiffEqCallbacks = "2.9" -DiffEqDevTools = "2.15" -DiffEqOperators = "4.3" -DiffEqParamEstim = "1.8" -DiffEqPhysics = "3.2" -DifferentialEquations = "6.8" -Distributions = "0.21" -DoubleFloats = "0.9, 1.0" -ForwardDiff = "0.10" IJulia = "1.20" -Latexify = "0.12" -Measurements = "2.1" -ModelingToolkit = "0.9, 0.10, 1.0" -NLsolve = "4.2" -Optim = "0.19" -OrdinaryDiffEq = "5.23" -ParameterizedFunctions = "4.2" -Plots = "0.27, 0.28" -PyPlot = "2.8" -RecursiveArrayTools = "1.0" -SparseDiffTools = "0.10, 1.0" -SparsityDetection = "0.1" -StaticArrays = "0.10, 0.11, 0.12" -StatsPlots = "0.12, 0.13" -Sundials = "3.8" -Unitful = "0.17, 0.18" -Weave = "0.9" -julia = "1" +Plots = "1.6" +Weave = "0.10" +julia = "1.6" diff --git a/README.md b/README.md index 8217f58d..10971dbd 100644 --- a/README.md +++ b/README.md @@ -1,83 +1,103 @@ -# DiffEqTutorials.jl +# SciMLTutorials.jl: Tutorials for Scientific Machine Learning and Differential Equations -[![Join the chat at https://gitter.im/JuliaDiffEq/Lobby](https://badges.gitter.im/JuliaDiffEq/Lobby.svg)](https://gitter.im/JuliaDiffEq/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Join the chat at https://julialang.zulipchat.com #sciml-bridged](https://img.shields.io/static/v1?label=Zulip&message=chat&color=9558b2&labelColor=389826)](https://julialang.zulipchat.com/#narrow/stream/279055-sciml-bridged) +[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](http://tutorials.sciml.ai/stable/) +[![Global Docs](https://img.shields.io/badge/docs-SciML-blue.svg)](https://docs.sciml.ai/dev/highlevels/learning_resources/#SciMLTutorials) -DiffEqTutorials.jl holds PDFs, webpages, and interactive Jupyter notebooks -showing how to utilize the software in the JuliaDiffEq ecosystem. This set of -tutorials was made to complement the -[documentation](http://docs.juliadiffeq.org/dev/) and the -[devdocs](http://devdocs.juliadiffeq.org/dev/) +[![Build status](https://badge.buildkite.com/8a39c2e1b44511eb84bdcd9019663cad757ae2479abd340508.svg)](https://buildkite.com/julialang/scimltutorials-dot-jl) + +[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac) +[![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle) + +SciMLTutorials.jl holds PDFs, webpages, and interactive Jupyter notebooks +showing how to utilize the software in the [SciML Scientific Machine Learning ecosystem](https://sciml.ai/). +This set of tutorials was made to complement the [documentation](https://sciml.ai/documentation/) +and the [devdocs](http://devdocs.sciml.ai/latest/) by providing practical examples of the concepts. For more details, please consult the docs. +#### Note: this library has been deprecated and its tutorials have been moved to the repos of the respective packages. It may be revived in the future if there is a need for longer-form tutorials! + +## Results + +To view the SciML Tutorials, go to [tutorials.sciml.ai](https://tutorials.sciml.ai/stable/). By default, this +will lead to the latest tagged version of the tutorials. To see the in-development version of the tutorials, go to +[https://tutorials.sciml.ai/dev/](https://tutorials.sciml.ai/dev/). + +Static outputs in pdf, markdown, and html reside in [SciMLTutorialsOutput](https://github.com/SciML/SciMLTutorialsOutput). + +## Video Tutorial + +[![Video Tutorial](https://user-images.githubusercontent.com/1814174/36342812-bdfd0606-13b8-11e8-9eff-ff219de909e5.PNG)](https://youtu.be/KPEqYtEd-zY) + ## Interactive Notebooks -To run the tutorials interactively via Jupyter notebooks, install the package -and open the tutorials like: +To generate the interactive notebooks, first install the SciMLTutorials, instantiate the +environment, and then run `SciMLTutorials.open_notebooks()`. This looks as follows: ```julia -using Pkg -pkg"add https://github.com/JuliaDiffEq/DiffEqTutorials.jl" -using DiffEqTutorials -DiffEqTutorials.open_notebooks() +]add SciMLTutorials#master +]activate SciMLTutorials +]instantiate +using SciMLTutorials +SciMLTutorials.open_notebooks() ``` -## Video Tutorial +The tutorials will be generated at your `pwd()` in a folder called `generated_notebooks`. -[![Video Tutorial](https://user-images.githubusercontent.com/1814174/36342812-bdfd0606-13b8-11e8-9eff-ff219de909e5.PNG)](https://youtu.be/KPEqYtEd-zY) +Note that when running the tutorials, the packages are not automatically added. Thus you +will need to add the packages manually or use the internal Project/Manifest tomls to +instantiate the correct packages. This can be done by activating the folder of the tutorials. +For example, + +```julia +using Pkg +Pkg.activate(joinpath(pkgdir(SciMLTutorials),"tutorials","models")) +Pkg.instantiate() +``` -## Table of Contents - -- Introduction - - [Introduction to DifferentialEquations.jl through ODEs](http://tutorials.juliadiffeq.org/html/introduction/01-ode_introduction.html) - - [Detecting Stiffness and Choosing an ODE Algorithm](http://tutorials.juliadiffeq.org/html/introduction/02-choosing_algs.html) - - [Optimizing your DiffEq Code](http://tutorials.juliadiffeq.org/html/introduction/03-optimizing_diffeq_code.html) - - [Callbacks and Event Handling](http://tutorials.juliadiffeq.org/html/introduction/04-callbacks_and_events.html) - - [Formatting Plots](http://tutorials.juliadiffeq.org/html/introduction/05-formatting_plots.html) -- Exercise Sheets - - [DifferentialEquations.jl Workshop Exercises](http://tutorials.juliadiffeq.org/html/exercises/01-workshop_exercises.html) - - [DifferentialEquations.jl Workshop Exercise Solutions](http://tutorials.juliadiffeq.org/html/exercises/02-workshop_solutions.html) -- Modeling Examples - - [Classical Physics Models](http://tutorials.juliadiffeq.org/html/models/01-classical_physics.html) - - [Conditional Dosing Example](http://tutorials.juliadiffeq.org/html/models/02-conditional_dosing.html) - - [DiffEqBiological Tutorial I: Introduction](http://tutorials.juliadiffeq.org/html/models/03-diffeqbio_I_introduction.html) - - [DiffEqBiological Tutorial II: Network Properties API](http://tutorials.juliadiffeq.org/html/models/04-diffeqbio_II_networkproperties.html) - - [DiffEqBiological Tutorial III: Steady-States and Bifurcations](http://tutorials.juliadiffeq.org/html/models/04b-diffeqbio_III_steadystates.html) - - [Kepler Problem Orbit](http://tutorials.juliadiffeq.org/html/models/05-kepler_problem.html) - - [Bayesian Inference of Pendulum Parameters](http://tutorials.juliadiffeq.org/html/models/06-pendulum_bayesian_inference.html) -- Advanced ODE Features - - [ModelingToolkit.jl, An IR and Compiler for Scientific Models](http://tutorials.juliadiffeq.org/html/ode_extras/01-ModelingToolkit.html) - - [Feagin's Order 10, 12, and 14 Methods](http://tutorials.juliadiffeq.org/html/ode_extras/02-feagin.html) - - [Finding Maxima and Minima of DiffEq Solutions](http://tutorials.juliadiffeq.org/html/ode_extras/03-ode_minmax.html) - - [Monte Carlo Parameter Estimation from Data](http://tutorials.juliadiffeq.org/html/ode_extras/04-monte_carlo_parameter_estim.html) -- Type Handling - - [Solving Equations with Julia-Defined Types](http://tutorials.juliadiffeq.org/html/type_handling/01-number_types.html) - - [Numbers with Uncertainties](http://tutorials.juliadiffeq.org/html/type_handling/02-uncertainties.html) - - [Unit Check Arithmetic via Unitful.jl](http://tutorials.juliadiffeq.org/html/type_handling/03-unitful.html) -- Advanced - - [A 2D Cardiac Electrophysiology Model (CUDA-accelerated PDE solver)](http://tutorials.juliadiffeq.org/html/advanced/01-beeler_reuter.html) - - [Solving Stiff Equations](http://tutorials.juliadiffeq.org/html/advanced/02-advanced_ODE_solving.html) +will add all of the packages required to run any tutorial in the `models` folder. ## Contributing -First of all, make sure that your current directory is `DiffEqTutorials`. All -of the files are generated from the Weave.jl files in the `tutorials` folder. +All of the files are generated from the Weave.jl files in the `tutorials` folder. The generation process runs automatically, +and thus one does not necessarily need to test the Weave process locally. Instead, simply open a PR that adds/updates a +file in the "tutorials" folder and the PR will generate the tutorial on demand. Its artifacts can then be inspected in the +Buildkite as described below before merging. Note that it will use the Project.toml and Manifest.toml of the subfolder, so +any changes to dependencies requires that those are updated. + +### Reporting Bugs and Issues + +Report any bugs or issues at [the SciMLTutorials repository](https://github.com/SciML/SciMLTutorials.jl/issues). + +### Inspecting Tutorial Results + +To see tutorial results before merging, click into the BuildKite, click onto +Artifacts, and then investigate the trained results. + +![](https://user-images.githubusercontent.com/1814174/118359358-02ddc980-b551-11eb-8a9b-24de947cefee.PNG) + +### Manually Generating Files + To run the generation process, do for example: ```julia -using Pkg, DiffEqTutorials -cd(joinpath(dirname(pathof(DiffEqTutorials)), "..")) -Pkg.pkg"activate ." -Pkg.pkg"instantiate" -DiffEqTutorials.weave_file("introduction","ode_introduction.jmd") +]activate SciMLTutorials # Get all of the packages +using SciMLTutorials +SciMLTutorials.weave_file(joinpath(pkgdir(SciMLTutorials),"tutorials","models"),"01-classical_physics.jmd") +``` + +To generate all of the files in a folder, for example, run: + +```julia +SciMLTutorials.weave_folder(joinpath(pkgdir(SciMLTutorials),"tutorials","models")) ``` To generate all of the notebooks, do: ```julia -DiffEqTutorials.weave_all() +SciMLTutorials.weave_all() ``` -If you add new tutorials which require new packages, simply updating your local -environment will change the project and manifest files. When this occurs, the -updated environment files should be included in the PR. +Each of the tuturials displays the computer characteristics at the bottom of +the benchmark. diff --git a/docs/Project.toml b/docs/Project.toml new file mode 100644 index 00000000..dfa65cd1 --- /dev/null +++ b/docs/Project.toml @@ -0,0 +1,2 @@ +[deps] +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" diff --git a/docs/extrasrc/assets/favicon.ico b/docs/extrasrc/assets/favicon.ico new file mode 100644 index 00000000..3c6bd470 Binary files /dev/null and b/docs/extrasrc/assets/favicon.ico differ diff --git a/docs/extrasrc/assets/logo.png b/docs/extrasrc/assets/logo.png new file mode 100644 index 00000000..6f4c3e26 Binary files /dev/null and b/docs/extrasrc/assets/logo.png differ diff --git a/docs/make.jl b/docs/make.jl new file mode 100644 index 00000000..19fec3c8 --- /dev/null +++ b/docs/make.jl @@ -0,0 +1,36 @@ +using Documenter, SciMLTutorialsOutput + +dir = @__DIR__() * "/.." + +@show dir +@show readdir(dir) + +include("pages.jl") + +mathengine = MathJax3(Dict(:loader => Dict("load" => ["[tex]/require", "[tex]/mathtools"]), + :tex => Dict("inlineMath" => [["\$", "\$"], ["\\(", "\\)"]], + "packages" => [ + "base", + "ams", + "autoload", + "mathtools", + "require" + ]))) + +makedocs( + sitename = "The SciML Tutorials", + authors = "Chris Rackauckas", + modules = [SciMLTutorialsOutput], + clean = true, doctest = false, + format = Documenter.HTML(#analytics = "UA-90474609-3", + assets = ["assets/favicon.ico"], + canonical = "https://tutorials.sciml.ai/stable/", + mathengine = mathengine), + pages = pages +) + +deploydocs(; + repo = "github.com/SciML/SciMLTutorialsOutput", + devbranch = "main", + branch = "main" +) diff --git a/docs/pages.jl b/docs/pages.jl new file mode 100644 index 00000000..e8f8df4e --- /dev/null +++ b/docs/pages.jl @@ -0,0 +1,50 @@ +# This file assumes `dir` is the directory for the package! dir = @__DIR__() * "/.." + +dir = @__DIR__() * "/.." + +cp(joinpath(dir, "markdown"), joinpath(dir, "docs", "src"), force = true) +cp(joinpath(dir, "docs", "extrasrc", "assets"), joinpath(dir, "docs", "src", "assets"), force = true) +cp(joinpath(dir, "README.md"), joinpath(dir, "docs", "src", "index.md"), force = true) +tutorialsdir = joinpath(dir, "docs", "src") + +pages = Any["SciMLTutorials.jl: Tutorials for Scientific Machine Learning (SciML), Equation Solvers, and AI for Science" => "index.md"] + +for folder in readdir(tutorialsdir) + newpages = Any[] + if folder[(end - 2):end] != ".md" && folder != "Testing" && folder != "figures" && + folder != "assets" + for file in filter(x -> x[(end - 2):end] == ".md", readdir( + joinpath(tutorialsdir, folder))) + try + filecontents = readlines(joinpath(tutorialsdir, folder, file)) + title = filecontents[3][9:(end - 1)] + + # Cut out the first 5 lines from the file to remove the Weave header stuff + open(joinpath(tutorialsdir, folder, file), "w") do output + println(output, "# $title") + for line in Iterators.drop(filecontents, 4) + println(output, line) + end + end + push!(newpages, title => joinpath(folder, file)) + catch e + @show folder, file, e + end + end + push!(pages, folder => newpages) + end +end + +# The result is in alphabetical order, change to the wanted order + +permute!(pages, + [1] +) + +names = [ + "SciMLTutorials.jl: Tutorials for Scientific Machine Learning (SciML) and Equation Solvers" +] + +for i in 1:length(pages) + pages[i] = names[i] => pages[i][2] +end diff --git a/docs/src/markdown/blank.jl b/docs/src/markdown/blank.jl new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/docs/src/markdown/blank.jl @@ -0,0 +1 @@ + diff --git a/html/advanced/01-beeler_reuter.html b/html/advanced/01-beeler_reuter.html deleted file mode 100644 index 74bae197..00000000 --- a/html/advanced/01-beeler_reuter.html +++ /dev/null @@ -1,1461 +0,0 @@ - - - - - - An Implicit/Explicit CUDA-Accelerated Solver for the 2D Beeler-Reuter Model - - - - - - - - - - - - - - - - - -
-
-
- -
-

An Implicit/Explicit CUDA-Accelerated Solver for the 2D Beeler-Reuter Model

-
Shahriar Iravanian
- -
- -

Background

-

JuliaDiffEq is a suite of optimized Julia libraries to solve ordinary differential equations (ODE). JuliaDiffEq provides a large number of explicit and implicit solvers suited for different types of ODE problems. It is possible to reduce a system of partial differential equations into an ODE problem by employing the method of lines (MOL). The essence of MOL is to discretize the spatial derivatives (by finite difference, finite volume or finite element methods) into algebraic equations and to keep the time derivatives as is. The resulting differential equations are left with only one independent variable (time) and can be solved with an ODE solver. Solving Systems of Stochastic PDEs and using GPUs in Julia is a brief introduction to MOL and using GPUs to accelerate PDE solving in JuliaDiffEq. Here we expand on this introduction by developing an implicit/explicit (IMEX) solver for a 2D cardiac electrophysiology model and show how to use CuArray and CUDAnative libraries to run the explicit part of the model on a GPU.

-

Note that this tutorial does not use the higher order IMEX methods built into DifferentialEquations.jl but instead shows how to hand-split an equation when the explicit portion has an analytical solution (or approxiate), which is common in many scenarios.

-

There are hundreds of ionic models that describe cardiac electrical activity in various degrees of detail. Most are based on the classic Hodgkin-Huxley model and define the time-evolution of different state variables in the form of nonlinear first-order ODEs. The state vector for these models includes the transmembrane potential, gating variables, and ionic concentrations. The coupling between cells is through the transmembrame potential only and is described as a reaction-diffusion equation, which is a parabolic PDE,

-

\[ -\partial V / \partial t = \nabla (D \nabla V) - \frac {I_\text{ion}} {C_m}, -\]

-

where $V$ is the transmembrane potential, $D$ is a diffusion tensor, $I_\text{ion}$ is the sum of the transmembrane currents and is calculated from the ODEs, and $C_m$ is the membrane capacitance and is usually assumed to be constant. Here we model a uniform and isotropic medium. Therefore, the model can be simplified to,

-

\[ -\partial V / \partial t = D \Delta{V} - \frac {I_\text{ion}} {C_m}, -\]

-

where $D$ is now a scalar. By nature, these models have to deal with different time scales and are therefore classified as stiff. Commonly, they are solved using the explicit Euler method, usually with a closed form for the integration of the gating variables (the Rush-Larsen method, see below). We can also solve these problems using implicit or semi-implicit PDE solvers (e.g., the Crank-Nicholson method combined with an iterative solver). Higher order explicit methods such as Runge-Kutta and linear multi-step methods cannot overcome the stiffness and are not particularly helpful.

-

In this tutorial, we first develop a CPU-only IMEX solver and then show how to move the explicit part to a GPU.

-

The Beeler-Reuter Model

-

We have chosen the Beeler-Reuter ventricular ionic model as our example. It is a classic model first described in 1977 and is used as a base for many other ionic models. It has eight state variables, which makes it complicated enough to be interesting without obscuring the main points of the exercise. The eight state variables are: the transmembrane potential ($V$), sodium-channel activation and inactivation gates ($m$ and $h$, similar to the Hodgkin-Huxley model), with an additional slow inactivation gate ($j$), calcium-channel activation and deactivations gates ($d$ and $f$), a time-dependent inward-rectifying potassium current gate ($x_1$), and intracellular calcium concentration ($c$). There are four currents: a sodium current ($i_{Na}$), a calcium current ($i_{Ca}$), and two potassium currents, one time-dependent ($i_{x_1}$) and one background time-independent ($i_{K_1}$).

-

CPU-Only Beeler-Reuter Solver

-

Let's start by developing a CPU only IMEX solver. The main idea is to use the DifferentialEquations framework to handle the implicit part of the equation and code the analytical approximation for explicit part separately. If no analytical approximation was known for the explicit part, one could use methods from this list.

-

First, we define the model constants:

- - -
-const v0 = -84.624
-const v1 = 10.0
-const C_K1 = 1.0f0
-const C_x1 = 1.0f0
-const C_Na = 1.0f0
-const C_s = 1.0f0
-const D_Ca = 0.0f0
-const D_Na = 0.0f0
-const g_s = 0.09f0
-const g_Na = 4.0f0
-const g_NaC = 0.005f0
-const ENa = 50.0f0 + D_Na
-const γ = 0.5f0
-const C_m = 1.0f0
-
- - -
-1.0f0
-
- - -

Note that the constants are defined as Float32 and not Float64. The reason is that most GPUs have many more single precision cores than double precision ones. To ensure uniformity between CPU and GPU, we also code most states variables as Float32 except for the transmembrane potential, which is solved by an implicit solver provided by the Sundial library and needs to be Float64.

-

The State Structure

-

Next, we define a struct to contain our state. BeelerReuterCpu is a functor and we will define a deriv function as its associated function.

- - -
-mutable struct BeelerReuterCpu <: Function
-    t::Float64              # the last timestep time to calculate Δt
-    diff_coef::Float64      # the diffusion-coefficient (coupling strength)
-
-    C::Array{Float32, 2}    # intracellular calcium concentration
-    M::Array{Float32, 2}    # sodium current activation gate (m)
-    H::Array{Float32, 2}    # sodium current inactivation gate (h)
-    J::Array{Float32, 2}    # sodium current slow inactivaiton gate (j)
-    D::Array{Float32, 2}    # calcium current activaiton gate (d)
-    F::Array{Float32, 2}    # calcium current inactivation gate (f)
-    XI::Array{Float32, 2}   # inward-rectifying potassium current (iK1)
-
-    Δu::Array{Float64, 2}   # place-holder for the Laplacian
-
-    function BeelerReuterCpu(u0, diff_coef)
-        self = new()
-
-        ny, nx = size(u0)
-        self.t = 0.0
-        self.diff_coef = diff_coef
-
-        self.C = fill(0.0001f0, (ny,nx))
-        self.M = fill(0.01f0, (ny,nx))
-        self.H = fill(0.988f0, (ny,nx))
-        self.J = fill(0.975f0, (ny,nx))
-        self.D = fill(0.003f0, (ny,nx))
-        self.F = fill(0.994f0, (ny,nx))
-        self.XI = fill(0.0001f0, (ny,nx))
-
-        self.Δu = zeros(ny,nx)
-
-        return self
-    end
-end
-
- - - -

Laplacian

-

The finite-difference Laplacian is calculated in-place by a 5-point stencil. The Neumann boundary condition is enforced. Note that we could have also used DiffEqOperators.jl to automate this step.

- - -
-# 5-point stencil
-function laplacian(Δu, u)
-    n1, n2 = size(u)
-
-    # internal nodes
-    for j = 2:n2-1
-        for i = 2:n1-1
-            @inbounds  Δu[i,j] = u[i+1,j] + u[i-1,j] + u[i,j+1] + u[i,j-1] - 4*u[i,j]
-        end
-    end
-
-    # left/right edges
-    for i = 2:n1-1
-        @inbounds Δu[i,1] = u[i+1,1] + u[i-1,1] + 2*u[i,2] - 4*u[i,1]
-        @inbounds Δu[i,n2] = u[i+1,n2] + u[i-1,n2] + 2*u[i,n2-1] - 4*u[i,n2]
-    end
-
-    # top/bottom edges
-    for j = 2:n2-1
-        @inbounds Δu[1,j] = u[1,j+1] + u[1,j-1] + 2*u[2,j] - 4*u[1,j]
-        @inbounds Δu[n1,j] = u[n1,j+1] + u[n1,j-1] + 2*u[n1-1,j] - 4*u[n1,j]
-    end
-
-    # corners
-    @inbounds Δu[1,1] = 2*(u[2,1] + u[1,2]) - 4*u[1,1]
-    @inbounds Δu[n1,1] = 2*(u[n1-1,1] + u[n1,2]) - 4*u[n1,1]
-    @inbounds Δu[1,n2] = 2*(u[2,n2] + u[1,n2-1]) - 4*u[1,n2]
-    @inbounds Δu[n1,n2] = 2*(u[n1-1,n2] + u[n1,n2-1]) - 4*u[n1,n2]
-end
-
- - -
-laplacian (generic function with 1 method)
-
- - -

The Rush-Larsen Method

-

We use an explicit solver for all the state variables except for the transmembrane potential which is solved with the help of an implicit solver. The explicit solver is a domain-specific exponential method, the Rush-Larsen method. This method utilizes an approximation on the model in order to transform the IMEX equation into a form suitable for an implicit ODE solver. This combination of implicit and explicit methods forms a specialized IMEX solver. For general IMEX integration, please see the IMEX solvers documentation. While we could have used the general model to solve the current problem, for this specific model, the transformation approach is more efficient and is of practical interest.

-

The Rush-Larsen method replaces the explicit Euler integration for the gating variables with direct integration. The starting point is the general ODE for the gating variables in Hodgkin-Huxley style ODEs,

-

\[ -\frac{dg}{dt} = \alpha(V) (1 - g) - \beta(V) g -\]

-

where $g$ is a generic gating variable, ranging from 0 to 1, and $\alpha$ and $\beta$ are reaction rates. This equation can be written as,

-

\[ -\frac{dg}{dt} = (g_{\infty} - g) / \tau_g, -\]

-

where $g_\infty$ and $\tau_g$ are

-

\[ -g_{\infty} = \frac{\alpha}{(\alpha + \beta)}, -\]

-

and,

-

\[ -\tau_g = \frac{1}{(\alpha + \beta)}. -\]

-

Assuing that $g_\infty$ and $\tau_g$ are constant for the duration of a single time step ($\Delta{t}$), which is a reasonable assumption for most cardiac models, we can integrate directly to have,

-

\[ -g(t + \Delta{t}) = g_{\infty} - \left(g_{\infty} - g(\Delta{t})\right)\,e^{-\Delta{t}/\tau_g}. -\]

-

This is the Rush-Larsen technique. Note that as $\Delta{t} \rightarrow 0$, this equations morphs into the explicit Euler formula,

-

\[ -g(t + \Delta{t}) = g(t) + \Delta{t}\frac{dg}{dt}. -\]

-

rush_larsen is a helper function that use the Rush-Larsen method to integrate the gating variables.

- - -
-@inline function rush_larsen(g, α, β, Δt)
-    inf = α/(α+β)
-    τ = 1f0 / (α+β)
-    return clamp(g + (g - inf) * expm1(-Δt/τ), 0f0, 1f0)
-end
-
- - -
-rush_larsen (generic function with 1 method)
-
- - -

The gating variables are updated as below. The details of how to calculate $\alpha$ and $\beta$ are based on the Beeler-Reuter model and not of direct interest to this tutorial.

- - -
-function update_M_cpu(g, v, Δt)
-    # the condition is needed here to prevent NaN when v == 47.0
-    α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (exp(-0.1f0*(v+47.0f0)) - 1.0f0)
-    β = (40.0f0 * exp(-0.056f0*(v+72.0f0)))
-    return rush_larsen(g, α, β, Δt)
-end
-
-function update_H_cpu(g, v, Δt)
-    α = 0.126f0 * exp(-0.25f0*(v+77.0f0))
-    β = 1.7f0 / (exp(-0.082f0*(v+22.5f0)) + 1.0f0)
-   return rush_larsen(g, α, β, Δt)
-end
-
-function update_J_cpu(g, v, Δt)
-    α = (0.55f0 * exp(-0.25f0*(v+78.0f0))) / (exp(-0.2f0*(v+78.0f0)) + 1.0f0)
-    β = 0.3f0 / (exp(-0.1f0*(v+32.0f0)) + 1.0f0)
-    return rush_larsen(g, α, β, Δt)
-end
-
-function update_D_cpu(g, v, Δt)
-    α = γ * (0.095f0 * exp(-0.01f0*(v-5.0f0))) / (exp(-0.072f0*(v-5.0f0)) + 1.0f0)
-    β = γ * (0.07f0 * exp(-0.017f0*(v+44.0f0))) / (exp(0.05f0*(v+44.0f0)) + 1.0f0)
-    return rush_larsen(g, α, β, Δt)
-end
-
-function update_F_cpu(g, v, Δt)
-    α = γ * (0.012f0 * exp(-0.008f0*(v+28.0f0))) / (exp(0.15f0*(v+28.0f0)) + 1.0f0)
-    β = γ * (0.0065f0 * exp(-0.02f0*(v+30.0f0))) / (exp(-0.2f0*(v+30.0f0)) + 1.0f0)
-    return rush_larsen(g, α, β, Δt)
-end
-
-function update_XI_cpu(g, v, Δt)
-    α = (0.0005f0 * exp(0.083f0*(v+50.0f0))) / (exp(0.057f0*(v+50.0f0)) + 1.0f0)
-    β = (0.0013f0 * exp(-0.06f0*(v+20.0f0))) / (exp(-0.04f0*(v+20.0f0)) + 1.0f0)
-    return rush_larsen(g, α, β, Δt)
-end
-
- - -
-update_XI_cpu (generic function with 1 method)
-
- - -

The intracelleular calcium is not technically a gating variable, but we can use a similar explicit exponential integrator for it.

- - -
-function update_C_cpu(g, d, f, v, Δt)
-    ECa = D_Ca - 82.3f0 - 13.0278f0 * log(g)
-    kCa = C_s * g_s * d * f
-    iCa = kCa * (v - ECa)
-    inf = 1.0f-7 * (0.07f0 - g)
-    τ = 1f0 / 0.07f0
-    return g + (g - inf) * expm1(-Δt/τ)
-end
-
- - -
-update_C_cpu (generic function with 1 method)
-
- - -

Implicit Solver

-

Now, it is time to define the derivative function as an associated function of BeelerReuterCpu. We plan to use the CVODE_BDF solver as our implicit portion. Similar to other iterative methods, it calls the deriv function with the same $t$ multiple times. For example, these are consecutive $t$s from a representative run:

-

0.86830 0.86830 0.85485 0.85485 0.85485 0.86359 0.86359 0.86359 0.87233 0.87233 0.87233 0.88598 ...

-

Here, every time step is called three times. We distinguish between two types of calls to the deriv function. When $t$ changes, the gating variables are updated by calling update_gates_cpu:

- - -
-function update_gates_cpu(u, XI, M, H, J, D, F, C, Δt)
-    let Δt = Float32(Δt)
-        n1, n2 = size(u)
-        for j = 1:n2
-            for i = 1:n1
-                v = Float32(u[i,j])
-
-                XI[i,j] = update_XI_cpu(XI[i,j], v, Δt)
-                M[i,j] = update_M_cpu(M[i,j], v, Δt)
-                H[i,j] = update_H_cpu(H[i,j], v, Δt)
-                J[i,j] = update_J_cpu(J[i,j], v, Δt)
-                D[i,j] = update_D_cpu(D[i,j], v, Δt)
-                F[i,j] = update_F_cpu(F[i,j], v, Δt)
-
-                C[i,j] = update_C_cpu(C[i,j], D[i,j], F[i,j], v, Δt)
-            end
-        end
-    end
-end
-
- - -
-update_gates_cpu (generic function with 1 method)
-
- - -

On the other hand, du is updated at each time step, since it is independent of $\Delta{t}$.

- - -
-# iK1 is the inward-rectifying potassium current
-function calc_iK1(v)
-    ea = exp(0.04f0*(v+85f0))
-    eb = exp(0.08f0*(v+53f0))
-    ec = exp(0.04f0*(v+53f0))
-    ed = exp(-0.04f0*(v+23f0))
-    return 0.35f0 * (4f0*(ea-1f0)/(eb + ec)
-            + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed)))
-end
-
-# ix1 is the time-independent background potassium current
-function calc_ix1(v, xi)
-    ea = exp(0.04f0*(v+77f0))
-    eb = exp(0.04f0*(v+35f0))
-    return xi * 0.8f0 * (ea-1f0) / eb
-end
-
-# iNa is the sodium current (similar to the classic Hodgkin-Huxley model)
-function calc_iNa(v, m, h, j)
-    return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa)
-end
-
-# iCa is the calcium current
-function calc_iCa(v, d, f, c)
-    ECa = D_Ca - 82.3f0 - 13.0278f0 * log(c)    # ECa is the calcium reversal potential
-    return C_s * g_s * d * f * (v - ECa)
-end
-
-function update_du_cpu(du, u, XI, M, H, J, D, F, C)
-    n1, n2 = size(u)
-
-    for j = 1:n2
-        for i = 1:n1
-            v = Float32(u[i,j])
-
-            # calculating individual currents
-            iK1 = calc_iK1(v)
-            ix1 = calc_ix1(v, XI[i,j])
-            iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j])
-            iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j])
-
-            # total current
-            I_sum = iK1 + ix1 + iNa + iCa
-
-            # the reaction part of the reaction-diffusion equation
-            du[i,j] = -I_sum / C_m
-        end
-    end
-end
-
- - -
-update_du_cpu (generic function with 1 method)
-
- - -

Finally, we put everything together is our deriv function, which is a call on BeelerReuterCpu.

- - -
-function (f::BeelerReuterCpu)(du, u, p, t)
-    Δt = t - f.t
-
-    if Δt != 0 || t == 0
-        update_gates_cpu(u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C, Δt)
-        f.t = t
-    end
-
-    laplacian(f.Δu, u)
-
-    # calculate the reaction portion
-    update_du_cpu(du, u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C)
-
-    # ...add the diffusion portion
-    du .+= f.diff_coef .* f.Δu
-end
-
- - - -

Results

-

Time to test! We need to define the starting transmembrane potential with the help of global constants v0 and v1, which represent the resting and activated potentials.

- - -
-const N = 192;
-u0 = fill(v0, (N, N));
-u0[90:102,90:102] .= v1;   # a small square in the middle of the domain
-
- - - -

The initial condition is a small square in the middle of the domain.

- - -
-using Plots
-heatmap(u0)
-
- - - - -

Next, the problem is defined:

- - -
-using DifferentialEquations, Sundials
-
-deriv_cpu = BeelerReuterCpu(u0, 1.0);
-prob = ODEProblem(deriv_cpu, u0, (0.0, 50.0));
-
- - - -

For stiff reaction-diffusion equations, CVODE_BDF from Sundial library is an excellent solver.

- - -
-@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0);
-
- - -
-38.822737 seconds (2.66 M allocations: 138.277 MiB, 0.28% gc time)
-
- - - -
-heatmap(sol.u[end])
-
- - - - -

CPU/GPU Beeler-Reuter Solver

-

GPUs are great for embarrassingly parallel problems but not so much for highly coupled models. We plan to keep the implicit part on CPU and run the decoupled explicit code on a GPU with the help of the CUDAnative library.

-

GPUs and CUDA

-

It this section, we present a brief summary of how GPUs (specifically NVIDIA GPUs) work and how to program them using the Julia CUDA interface. The readers who are familiar with these basic concepts may skip this section.

-

Let's start by looking at the hardware of a typical high-end GPU, GTX 1080. It has four Graphics Processing Clusters (equivalent to a discrete CPU), each harboring five Streaming Multiprocessor (similar to a CPU core). Each SM has 128 single-precision CUDA cores. Therefore, GTX 1080 has a total of 4 x 5 x 128 = 2560 CUDA cores. The maximum theoretical throughput for a GTX 1080 is reported as 8.87 TFLOPS. This figure is calculated for a boost clock frequency of 1.733 MHz as 2 x 2560 x 1.733 MHz = 8.87 TFLOPS. The factor 2 is included because two single floating point operations, a multiplication and an addition, can be done in a clock cycle as part of a fused-multiply-addition FMA operation. GTX 1080 also has 8192 MB of global memory accessible to all the cores (in addition to local and shared memory on each SM).

-

A typical CUDA application has the following flow:

-
    -
  1. Define and initialize the problem domain tensors (multi-dimensional arrays) in CPU memory.

    -
  2. -
  3. Allocate corresponding tensors in the GPU global memory.

    -
  4. -
  5. Transfer the input tensors from CPU to the corresponding GPU tensors.

    -
  6. -
  7. Invoke CUDA kernels (i.e., the GPU functions callable from CPU) that operate on the GPU tensors.

    -
  8. -
  9. Transfer the result tensors from GPU back to CPU.

    -
  10. -
  11. Process tensors on CPU.

    -
  12. -
  13. Repeat steps 3-6 as needed.

    -
  14. -
-

Some libraries, such as ArrayFire, hide the complexicities of steps 2-5 behind a higher level of abstraction. However, here we take a lower level route. By using CuArray and CUDAnative, we achieve a finer-grained control and higher performance. In return, we need to implement each step manually.

-

CuArray is a thin abstraction layer over the CUDA API and allows us to define GPU-side tensors and copy data to and from them but does not provide for operations on tensors. CUDAnative is a compiler that translates Julia functions designated as CUDA kernels into ptx (a high-level CUDA assembly language).

-

The CUDA Code

-

The key to fast CUDA programs is to minimize CPU/GPU memory transfers and global memory accesses. The implicit solver is currently CPU only, but it only needs access to the transmembrane potential. The rest of state variables reside on the GPU memory.

-

We modify $BeelerReuterCpu$ into $BeelerReuterGpu$ by defining the state variables as CuArrays instead of standard Julia Arrays. The name of each variable defined on GPU is prefixed by d_ for clarity. Note that $\Delta{v}$ is a temporary storage for the Laplacian and stays on the CPU side.

- - -
-using CUDAnative, CuArrays
-
-mutable struct BeelerReuterGpu <: Function
-    t::Float64                  # the last timestep time to calculate Δt
-    diff_coef::Float64          # the diffusion-coefficient (coupling strength)
-
-    d_C::CuArray{Float32, 2}    # intracellular calcium concentration
-    d_M::CuArray{Float32, 2}    # sodium current activation gate (m)
-    d_H::CuArray{Float32, 2}    # sodium current inactivation gate (h)
-    d_J::CuArray{Float32, 2}    # sodium current slow inactivaiton gate (j)
-    d_D::CuArray{Float32, 2}    # calcium current activaiton gate (d)
-    d_F::CuArray{Float32, 2}    # calcium current inactivation gate (f)
-    d_XI::CuArray{Float32, 2}   # inward-rectifying potassium current (iK1)
-
-    d_u::CuArray{Float64, 2}    # place-holder for u in the device memory
-    d_du::CuArray{Float64, 2}   # place-holder for d_u in the device memory
-
-    Δv::Array{Float64, 2}       # place-holder for voltage gradient
-
-    function BeelerReuterGpu(u0, diff_coef)
-        self = new()
-
-        ny, nx = size(u0)
-        @assert (nx % 16 == 0) && (ny % 16 == 0)
-        self.t = 0.0
-        self.diff_coef = diff_coef
-
-        self.d_C = CuArray(fill(0.0001f0, (ny,nx)))
-        self.d_M = CuArray(fill(0.01f0, (ny,nx)))
-        self.d_H = CuArray(fill(0.988f0, (ny,nx)))
-        self.d_J = CuArray(fill(0.975f0, (ny,nx)))
-        self.d_D = CuArray(fill(0.003f0, (ny,nx)))
-        self.d_F = CuArray(fill(0.994f0, (ny,nx)))
-        self.d_XI = CuArray(fill(0.0001f0, (ny,nx)))
-
-        self.d_u = CuArray(u0)
-        self.d_du = CuArray(zeros(ny,nx))
-
-        self.Δv = zeros(ny,nx)
-
-        return self
-    end
-end
-
- - - -

The Laplacian function remains unchanged. The main change to the explicit gating solvers is that exp and expm1 functions are prefixed by CUDAnative.. This is a technical nuisance that will hopefully be resolved in future.

- - -
-function rush_larsen_gpu(g, α, β, Δt)
-    inf = α/(α+β)
-    τ = 1.0/(α+β)
-    return clamp(g + (g - inf) * CUDAnative.expm1(-Δt/τ), 0f0, 1f0)
-end
-
-function update_M_gpu(g, v, Δt)
-    # the condition is needed here to prevent NaN when v == 47.0
-    α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (CUDAnative.exp(-0.1f0*(v+47.0f0)) - 1.0f0)
-    β = (40.0f0 * CUDAnative.exp(-0.056f0*(v+72.0f0)))
-    return rush_larsen_gpu(g, α, β, Δt)
-end
-
-function update_H_gpu(g, v, Δt)
-    α = 0.126f0 * CUDAnative.exp(-0.25f0*(v+77.0f0))
-    β = 1.7f0 / (CUDAnative.exp(-0.082f0*(v+22.5f0)) + 1.0f0)
-    return rush_larsen_gpu(g, α, β, Δt)
-end
-
-function update_J_gpu(g, v, Δt)
-    α = (0.55f0 * CUDAnative.exp(-0.25f0*(v+78.0f0))) / (CUDAnative.exp(-0.2f0*(v+78.0f0)) + 1.0f0)
-    β = 0.3f0 / (CUDAnative.exp(-0.1f0*(v+32.0f0)) + 1.0f0)
-    return rush_larsen_gpu(g, α, β, Δt)
-end
-
-function update_D_gpu(g, v, Δt)
-    α = γ * (0.095f0 * CUDAnative.exp(-0.01f0*(v-5.0f0))) / (CUDAnative.exp(-0.072f0*(v-5.0f0)) + 1.0f0)
-    β = γ * (0.07f0 * CUDAnative.exp(-0.017f0*(v+44.0f0))) / (CUDAnative.exp(0.05f0*(v+44.0f0)) + 1.0f0)
-    return rush_larsen_gpu(g, α, β, Δt)
-end
-
-function update_F_gpu(g, v, Δt)
-    α = γ * (0.012f0 * CUDAnative.exp(-0.008f0*(v+28.0f0))) / (CUDAnative.exp(0.15f0*(v+28.0f0)) + 1.0f0)
-    β = γ * (0.0065f0 * CUDAnative.exp(-0.02f0*(v+30.0f0))) / (CUDAnative.exp(-0.2f0*(v+30.0f0)) + 1.0f0)
-    return rush_larsen_gpu(g, α, β, Δt)
-end
-
-function update_XI_gpu(g, v, Δt)
-    α = (0.0005f0 * CUDAnative.exp(0.083f0*(v+50.0f0))) / (CUDAnative.exp(0.057f0*(v+50.0f0)) + 1.0f0)
-    β = (0.0013f0 * CUDAnative.exp(-0.06f0*(v+20.0f0))) / (CUDAnative.exp(-0.04f0*(v+20.0f0)) + 1.0f0)
-    return rush_larsen_gpu(g, α, β, Δt)
-end
-
-function update_C_gpu(c, d, f, v, Δt)
-    ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c)
-    kCa = C_s * g_s * d * f
-    iCa = kCa * (v - ECa)
-    inf = 1.0f-7 * (0.07f0 - c)
-    τ = 1f0 / 0.07f0
-    return c + (c - inf) * CUDAnative.expm1(-Δt/τ)
-end
-
- - -
-update_C_gpu (generic function with 1 method)
-
- - -

Similarly, we modify the functions to calculate the individual currents by adding CUDAnative prefix.

- - -
-# iK1 is the inward-rectifying potassium current
-function calc_iK1(v)
-    ea = CUDAnative.exp(0.04f0*(v+85f0))
-    eb = CUDAnative.exp(0.08f0*(v+53f0))
-    ec = CUDAnative.exp(0.04f0*(v+53f0))
-    ed = CUDAnative.exp(-0.04f0*(v+23f0))
-    return 0.35f0 * (4f0*(ea-1f0)/(eb + ec)
-            + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed)))
-end
-
-# ix1 is the time-independent background potassium current
-function calc_ix1(v, xi)
-    ea = CUDAnative.exp(0.04f0*(v+77f0))
-    eb = CUDAnative.exp(0.04f0*(v+35f0))
-    return xi * 0.8f0 * (ea-1f0) / eb
-end
-
-# iNa is the sodium current (similar to the classic Hodgkin-Huxley model)
-function calc_iNa(v, m, h, j)
-    return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa)
-end
-
-# iCa is the calcium current
-function calc_iCa(v, d, f, c)
-    ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c)    # ECa is the calcium reversal potential
-    return C_s * g_s * d * f * (v - ECa)
-end
-
- - -
-calc_iCa (generic function with 1 method)
-
- - -

CUDA Kernels

-

A CUDA program does not directly deal with GPCs and SMs. The logical view of a CUDA program is in the term of blocks and threads. We have to specify the number of block and threads when running a CUDA kernel. Each thread runs on a single CUDA core. Threads are logically bundled into blocks, which are in turn specified on a grid. The grid stands for the entirety of the domain of interest.

-

Each thread can find its logical coordinate by using few pre-defined indexing variables (threadIdx, blockIdx, blockDim and gridDim) in C/C++ and the corresponding functions (e.g., threadIdx()) in Julia. There variables and functions are defined automatically for each thread and may return a different value depending on the calling thread. The return value of these functions is a 1, 2, or 3 dimensional structure whose elements can be accessed as .x, .y, and .z (for a 1-dimensional case, .x reports the actual index and .y and .z simply return 1). For example, if we deploy a kernel in 128 blocks and with 256 threads per block, each thread will see

-
    gridDim.x = 128;
-    blockDim=256;
-

while blockIdx.x ranges from 0 to 127 in C/C++ and 1 to 128 in Julia. Similarly, threadIdx.x will be between 0 to 255 in C/C++ (of course, in Julia the range will be 1 to 256).

-

A C/C++ thread can calculate its index as

-
    int idx = blockDim.x * blockIdx.x + threadIdx.x;
-

In Julia, we have to take into account base 1. Therefore, we use the following formula

-
    idx = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x
-

A CUDA programmer is free to interpret the calculated index however it fits the application, but in practice, it is usually interpreted as an index into input tensors.

-

In the GPU version of the solver, each thread works on a single element of the medium, indexed by a (x,y) pair. update_gates_gpu and update_du_gpu are very similar to their CPU counterparts but are in fact CUDA kernels where the for loops are replaced with CUDA specific indexing. Note that CUDA kernels cannot return a valve; hence, nothing at the end.

- - -
-function update_gates_gpu(u, XI, M, H, J, D, F, C, Δt)
-    i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x
-    j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y
-
-    v = Float32(u[i,j])
-
-    let Δt = Float32(Δt)
-        XI[i,j] = update_XI_gpu(XI[i,j], v, Δt)
-        M[i,j] = update_M_gpu(M[i,j], v, Δt)
-        H[i,j] = update_H_gpu(H[i,j], v, Δt)
-        J[i,j] = update_J_gpu(J[i,j], v, Δt)
-        D[i,j] = update_D_gpu(D[i,j], v, Δt)
-        F[i,j] = update_F_gpu(F[i,j], v, Δt)
-
-        C[i,j] = update_C_gpu(C[i,j], D[i,j], F[i,j], v, Δt)
-    end
-    nothing
-end
-
-function update_du_gpu(du, u, XI, M, H, J, D, F, C)
-    i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x
-    j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y
-
-    v = Float32(u[i,j])
-
-    # calculating individual currents
-    iK1 = calc_iK1(v)
-    ix1 = calc_ix1(v, XI[i,j])
-    iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j])
-    iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j])
-
-    # total current
-    I_sum = iK1 + ix1 + iNa + iCa
-
-    # the reaction part of the reaction-diffusion equation
-    du[i,j] = -I_sum / C_m
-    nothing
-end
-
- - -
-update_du_gpu (generic function with 1 method)
-
- - -

Implicit Solver

-

Finally, the deriv function is modified to copy u to GPU and copy du back and to invoke CUDA kernels.

- - -
-function (f::BeelerReuterGpu)(du, u, p, t)
-    L = 16   # block size
-    Δt = t - f.t
-    copyto!(f.d_u, u)
-    ny, nx = size(u)
-
-    if Δt != 0 || t == 0
-        @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_gates_gpu(
-            f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C, Δt)
-        f.t = t
-    end
-
-    laplacian(f.Δv, u)
-
-    # calculate the reaction portion
-    @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_du_gpu(
-        f.d_du, f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C)
-
-    copyto!(du, f.d_du)
-
-    # ...add the diffusion portion
-    du .+= f.diff_coef .* f.Δv
-end
-
- - - -

Ready to test!

- - -
-using DifferentialEquations, Sundials
-
-deriv_gpu = BeelerReuterGpu(u0, 1.0);
-prob = ODEProblem(deriv_gpu, u0, (0.0, 50.0));
-@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0);
-
- - -
-9.548554 seconds (4.44 M allocations: 1.763 GiB, 7.35% gc time)
-
- - - -
-heatmap(sol.u[end])
-
- - - - -

Summary

-

We achieve around a 6x speedup with running the explicit portion of our IMEX solver on a GPU. The major bottleneck of this technique is the communication between CPU and GPU. In its current form, not all of the internals of the method utilize GPU acceleration. In particular, the implicit equations solved by GMRES are performed on the CPU. This partial CPU nature also increases the amount of data transfer that is required between the GPU and CPU (performed every f call). Compiling the full ODE solver to the GPU would solve both of these issues and potentially give a much larger speedup. JuliaDiffEq developers are currently working on solutions to alleviate these issues, but these will only be compatible with native Julia solvers (and not Sundials).

- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("advanced","01-beeler_reuter.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/advanced/02-advanced_ODE_solving.html b/html/advanced/02-advanced_ODE_solving.html deleted file mode 100644 index 5e86d7f5..00000000 --- a/html/advanced/02-advanced_ODE_solving.html +++ /dev/null @@ -1,1614 +0,0 @@ - - - - - - Solving Stiff Equations - - - - - - - - - - - - - - - - - -
-
-
- -
-

Solving Stiff Equations

-
Chris Rackauckas
- -
- -

This tutorial is for getting into the extra features for solving stiff ordinary differential equations in an efficient manner. Solving stiff ordinary differential equations requires specializing the linear solver on properties of the Jacobian in order to cut down on the O(n^3) linear solve and the O(n^2) back-solves. Note that these same functions and controls also extend to stiff SDEs, DDEs, DAEs, etc.

-

Code Optimization for Differential Equations

-

Writing Efficient Code

-

For a detailed tutorial on how to optimize one's DifferentialEquations.jl code, please see the Optimizing DiffEq Code tutorial.

-

Choosing a Good Solver

-

Choosing a good solver is required for getting top notch speed. General recommendations can be found on the solver page (for example, the ODE Solver Recommendations). The current recommendations can be simplified to a Rosenbrock method (Rosenbrock23 or Rodas5) for smaller (<50 ODEs) problems, ESDIRK methods for slightly larger (TRBDF2 or KenCarp4 for <2000 ODEs), and Sundials CVODE_BDF for even larger problems. lsoda from LSODA.jl is generally worth a try.

-

More details on the solver to choose can be found by benchmarking. See the DiffEqBenchmarks to compare many solvers on many problems.

-

Check Out the Speed FAQ

-

See this FAQ for information on common pitfalls and how to improve performance.

-

Setting Up Your Julia Installation for Speed

-

Julia uses an underlying BLAS implementation for its matrix multiplications and factorizations. This library is automatically multithreaded and accelerates the internal linear algebra of DifferentialEquations.jl. However, for optimality, you should make sure that the number of BLAS threads that you are using matches the number of physical cores and not the number of logical cores. See this issue for more details.

-

To check the number of BLAS threads, use:

- - -
-ccall((:openblas_get_num_threads64_, Base.libblas_name), Cint, ())
-
- - -
-8
-
- - -

If I want to set this directly to 4 threads, I would use:

- - -
-using LinearAlgebra
-LinearAlgebra.BLAS.set_num_threads(4)
-
- - - -

Additionally, in some cases Intel's MKL might be a faster BLAS than the standard BLAS that ships with Julia (OpenBLAS). To switch your BLAS implementation, you can use MKL.jl which will accelerate the linear algebra routines. Please see the package for the limitations.

-

Use Accelerator Hardware

-

When possible, use GPUs. If your ODE system is small and you need to solve it with very many different parameters, see the ensembles interface and DiffEqGPU.jl. If your problem is large, consider using a CuArray for the state to allow for GPU-parallelism of the internal linear algebra.

-

Speeding Up Jacobian Calculations

-

When one is using an implicit or semi-implicit differential equation solver, the Jacobian must be built at many iterations and this can be one of the most expensive steps. There are two pieces that must be optimized in order to reach maximal efficiency when solving stiff equations: the sparsity pattern and the construction of the Jacobian. The construction is filling the matrix J with values, while the sparsity pattern is what J to use.

-

The sparsity pattern is given by a prototype matrix, the jac_prototype, which will be copied to be used as J. The default is for J to be a Matrix, i.e. a dense matrix. However, if you know the sparsity of your problem, then you can pass a different matrix type. For example, a SparseMatrixCSC will give a sparse matrix. Additionally, structured matrix types like Tridiagonal, BandedMatrix (from BandedMatrices.jl), BlockBandedMatrix (from BlockBandedMatrices.jl), and more can be given. DifferentialEquations.jl will internally use this matrix type, making the factorizations faster by utilizing the specialized forms.

-

For the construction, there are 3 ways to fill J:

-
    -
  • The default, which uses normal finite/automatic differentiation

    -
  • -
  • A function jac(J,u,p,t) which directly computes the values of J

    -
  • -
  • A colorvec which defines a sparse differentiation scheme.

    -
  • -
-

We will now showcase how to make use of this functionality with growing complexity.

-

Declaring Jacobian Functions

-

Let's solve the Rosenbrock equations:

-

\[ -\begin{align} -dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ -dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ -dy_3 &= 3*10^7 y_{3}^2 \\ -\end{align} -\]

-

In order to reduce the Jacobian construction cost, one can describe a Jacobian function by using the jac argument for the ODEFunction. First, let's do a standard ODEProblem:

- - -
-using DifferentialEquations
-function rober(du,u,p,t)
-  y₁,y₂,y₃ = u
-  k₁,k₂,k₃ = p
-  du[1] = -k₁*y₁+k₃*y₂*y₃
-  du[2] =  k₁*y₁-k₂*y₂^2-k₃*y₂*y₃
-  du[3] =  k₂*y₂^2
-  nothing
-end
-prob = ODEProblem(rober,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
-sol = solve(prob,Rosenbrock23())
-
-using Plots
-plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1))
-
- - - - - -
-using BenchmarkTools
-@btime solve(prob)
-
- - -
-409.600 μs (3063 allocations: 161.83 KiB)
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 115-element Array{Float64,1}:
-      0.0                  
-      0.0014148468219250373
-      0.0020449182545311173
-      0.0031082402716566307
-      0.004077787050059496 
-      0.005515332443361059 
-      0.007190040962774541 
-      0.009125372578778032 
-      0.011053912492732977 
-      0.012779077276958607 
-      ⋮                    
-  47335.56357690261        
-  52732.01292853374        
-  58693.72991412389        
-  65278.000210850696       
-  72548.20206513454        
-  80574.5643369749         
-  89435.05301092885        
-  99216.41264599326        
- 100000.0                  
-u: 115-element Array{Array{Float64,1},1}:
- [1.0, 0.0, 0.0]                                                    
- [0.9999434113193613, 3.283958829839966e-5, 2.3749092340286502e-5]  
- [0.9999182177783585, 3.55426801363446e-5, 4.6239541505020656e-5]   
- [0.999875715036629, 3.6302469334849744e-5, 8.798249403609506e-5]   
- [0.9998369766077329, 3.646280308115459e-5, 0.00012656058918590176] 
- [0.9997795672444667, 3.646643085642237e-5, 0.0001839663246768369]  
- [0.9997127287139348, 3.6447279992896e-5, 0.00025082400607228316]   
- [0.9996355450022019, 3.6366816179962866e-5, 0.00032808818161818775]
- [0.9995586925734838, 3.6018927453312764e-5, 0.00040528849906290045]
- [0.9994899965196854, 3.468694637786026e-5, 0.000475316533936808]   
- ⋮                                                                  
- [0.03394368168613229, 1.404798439362035e-7, 0.9660561778340258]    
- [0.031028975539652698, 1.280360743781007e-7, 0.9689708964242754]   
- [0.02835436357223889, 1.1668209524677941e-7, 0.9716455197456683]   
- [0.025901326001934923, 1.0632276689411095e-7, 0.9740985676753005]  
- [0.023652545345805354, 9.687112514942483e-8, 0.9763473577830714]   
- [0.021591862129552664, 8.824767963573306e-8, 0.9784080496227692]   
- [0.019704225538717677, 8.037977048382674e-8, 0.9802956940815135]   
- [0.017975641463053707, 7.320098240041474e-8, 0.9820242853359655]   
- [0.017850566233695766, 7.268384360678819e-8, 0.9821493610824623]
-
- - -

Now we want to add the Jacobian. First we have to derive the Jacobian $\frac{df_i}{du_j}$ which is J[i,j]. From this we get:

- - -
-function rober_jac(J,u,p,t)
-  y₁,y₂,y₃ = u
-  k₁,k₂,k₃ = p
-  J[1,1] = k₁ * -1
-  J[2,1] = k₁
-  J[3,1] = 0
-  J[1,2] = y₃ * k₃
-  J[2,2] = y₂ * k₂ * -2 + y₃ * k₃ * -1
-  J[3,2] = y₂ * 2 * k₂
-  J[1,3] = k₃ * y₂
-  J[2,3] = k₃ * y₂ * -1
-  J[3,3] = 0
-  nothing
-end
-f = ODEFunction(rober, jac=rober_jac)
-prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
-
-@btime solve(prob_jac)
-
- - -
-317.900 μs (2599 allocations: 153.11 KiB)
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 115-element Array{Float64,1}:
-      0.0                  
-      0.0014148468219250373
-      0.0020449182545311173
-      0.0031082402716566307
-      0.004077787050059496 
-      0.005515332443361059 
-      0.007190040962774541 
-      0.009125372578778032 
-      0.011053912492732977 
-      0.012779077276958607 
-      ⋮                    
-  45964.060340548356       
-  51219.40381376205        
-  57025.01899700374        
-  63436.021374561584       
-  70513.1073617524         
-  78323.14229130604        
-  86939.82338876331        
-  96444.41085674686        
- 100000.0                  
-u: 115-element Array{Array{Float64,1},1}:
- [1.0, 0.0, 0.0]                                                    
- [0.9999434113193613, 3.283958829839966e-5, 2.3749092340286502e-5]  
- [0.9999182177783585, 3.55426801363446e-5, 4.6239541505020656e-5]   
- [0.999875715036629, 3.6302469334849744e-5, 8.798249403609506e-5]   
- [0.9998369766077329, 3.646280308115459e-5, 0.00012656058918590176] 
- [0.9997795672444667, 3.646643085642237e-5, 0.0001839663246768369]  
- [0.9997127287139348, 3.6447279992896e-5, 0.00025082400607228316]   
- [0.9996355450022019, 3.6366816179962866e-5, 0.00032808818161818775]
- [0.9995586925734838, 3.6018927453312764e-5, 0.00040528849906290045]
- [0.9994899965196854, 3.468694637786026e-5, 0.000475316533936808]   
- ⋮                                                                  
- [0.03478048133177493, 1.4406682005231008e-7, 0.9652193746014031]   
- [0.03179591062189176, 1.313038656880417e-7, 0.9682039580742408]    
- [0.029057356622057315, 1.1966100432939363e-7, 0.9709425237169371]  
- [0.02654597011713668, 1.0904070990251299e-7, 0.9734539208421517]   
- [0.024244118287194777, 9.935385522693504e-8, 0.9757557823589477]   
- [0.022135344621501105, 9.05190025093182e-8, 0.9778645648594945]    
- [0.02020432071854, 8.246174295748071e-8, 0.9797955968197154]       
- [0.018436796681356796, 7.511410189106845e-8, 0.9815631282045397]   
- [0.01785426048218692, 7.269900678199638e-8, 0.9821456668188047]
-
- - -

Automatic Derivation of Jacobian Functions

-

But that was hard! If you want to take the symbolic Jacobian of numerical code, we can make use of ModelingToolkit.jl to symbolicify the numerical code and do the symbolic calculation and return the Julia code for this.

- - -
-using ModelingToolkit
-de = modelingtoolkitize(prob)
-ModelingToolkit.generate_jacobian(de...)[2] # Second is in-place
-
- - -
-:((##MTIIPVar#392, u, p, t)->begin
-          #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils
-.jl:65 =#
-          #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils
-.jl:66 =#
-          let (x₁, x₂, x₃, α₁, α₂, α₃) = (u[1], u[2], u[3], p[1], p[2], p[3
-])
-              ##MTIIPVar#392[1] = α₁ * -1
-              ##MTIIPVar#392[2] = α₁
-              ##MTIIPVar#392[3] = 0
-              ##MTIIPVar#392[4] = x₃ * α₃
-              ##MTIIPVar#392[5] = x₂ * α₂ * -2 + x₃ * α₃ * -1
-              ##MTIIPVar#392[6] = x₂ * 2 * α₂
-              ##MTIIPVar#392[7] = α₃ * x₂
-              ##MTIIPVar#392[8] = α₃ * x₂ * -1
-              ##MTIIPVar#392[9] = 0
-          end
-          #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils
-.jl:67 =#
-          nothing
-      end)
-
- - -

which outputs:

- - - -
-:((##MTIIPVar#376, u, p, t)->begin
-          #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:65 =#
-          #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:66 =#
-          let (x₁, x₂, x₃, α₁, α₂, α₃) = (u[1], u[2], u[3], p[1], p[2], p[3])
-              ##MTIIPVar#376[1] = α₁ * -1
-              ##MTIIPVar#376[2] = α₁
-              ##MTIIPVar#376[3] = 0
-              ##MTIIPVar#376[4] = x₃ * α₃
-              ##MTIIPVar#376[5] = x₂ * α₂ * -2 + x₃ * α₃ * -1
-              ##MTIIPVar#376[6] = x₂ * 2 * α₂
-              ##MTIIPVar#376[7] = α₃ * x₂
-              ##MTIIPVar#376[8] = α₃ * x₂ * -1
-              ##MTIIPVar#376[9] = 0
-          end
-          #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:67 =#
-          nothing
-      end)
-
- - -

Now let's use that to give the analytical solution Jacobian:

- - -
-jac = eval(ModelingToolkit.generate_jacobian(de...)[2])
-f = ODEFunction(rober, jac=jac)
-prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 100000.0)
-u0: [1.0, 0.0, 0.0]
-
- - -

Declaring a Sparse Jacobian

-

Jacobian sparsity is declared by the jac_prototype argument in the ODEFunction. Note that you should only do this if the sparsity is high, for example, 0.1% of the matrix is non-zeros, otherwise the overhead of sparse matrices can be higher than the gains from sparse differentiation!

-

But as a demonstration, let's build a sparse matrix for the Rober problem. We can do this by gathering the I and J pairs for the non-zero components, like:

- - -
-I = [1,2,1,2,3,1,2]
-J = [1,1,2,2,2,3,3]
-using SparseArrays
-jac_prototype = sparse(I,J,1.0)
-
- - -
-3×3 SparseArrays.SparseMatrixCSC{Float64,Int64} with 7 stored entries:
-  [1, 1]  =  1.0
-  [2, 1]  =  1.0
-  [1, 2]  =  1.0
-  [2, 2]  =  1.0
-  [3, 2]  =  1.0
-  [1, 3]  =  1.0
-  [2, 3]  =  1.0
-
- - -

Now this is the sparse matrix prototype that we want to use in our solver, which we then pass like:

- - -
-f = ODEFunction(rober, jac=jac, jac_prototype=jac_prototype)
-prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 100000.0)
-u0: [1.0, 0.0, 0.0]
-
- - -

Automatic Sparsity Detection

-

One of the useful companion tools for DifferentialEquations.jl is SparsityDetection.jl. This allows for automatic declaration of Jacobian sparsity types. To see this in action, let's look at the 2-dimensional Brusselator equation:

- - -
-const N = 32
-const xyd_brusselator = range(0,stop=1,length=N)
-brusselator_f(x, y, t) = (((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) * (t >= 1.1) * 5.
-limit(a, N) = a == N+1 ? 1 : a == 0 ? N : a
-function brusselator_2d_loop(du, u, p, t)
-  A, B, alpha, dx = p
-  alpha = alpha/dx^2
-  @inbounds for I in CartesianIndices((N, N))
-    i, j = Tuple(I)
-    x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]]
-    ip1, im1, jp1, jm1 = limit(i+1, N), limit(i-1, N), limit(j+1, N), limit(j-1, N)
-    du[i,j,1] = alpha*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) +
-                B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t)
-    du[i,j,2] = alpha*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) +
-                A*u[i,j,1] - u[i,j,1]^2*u[i,j,2]
-    end
-end
-p = (3.4, 1., 10., step(xyd_brusselator))
-
- - -
-(3.4, 1.0, 10.0, 0.03225806451612903)
-
- - -

Given this setup, we can give and example input and output and call sparsity! on our function with the example arguments and it will kick out a sparse matrix with our pattern, that we can turn into our jac_prototype.

- - -
-using SparsityDetection, SparseArrays
-input = rand(32,32,2)
-output = similar(input)
-sparsity_pattern = sparsity!(brusselator_2d_loop,output,input,p,0.0)
-
- - -
-Explored path: SparsityDetection.Path(Bool[], 1)
-
- - - -
-jac_sparsity = Float64.(sparse(sparsity_pattern))
-
- - -
-2048×2048 SparseArrays.SparseMatrixCSC{Float64,Int64} with 12288 stored ent
-ries:
-  [1   ,    1]  =  1.0
-  [2   ,    1]  =  1.0
-  [32  ,    1]  =  1.0
-  [33  ,    1]  =  1.0
-  [993 ,    1]  =  1.0
-  [1025,    1]  =  1.0
-  [1   ,    2]  =  1.0
-  [2   ,    2]  =  1.0
-  [3   ,    2]  =  1.0
-  ⋮
-  [2015, 2047]  =  1.0
-  [2046, 2047]  =  1.0
-  [2047, 2047]  =  1.0
-  [2048, 2047]  =  1.0
-  [1024, 2048]  =  1.0
-  [1056, 2048]  =  1.0
-  [2016, 2048]  =  1.0
-  [2017, 2048]  =  1.0
-  [2047, 2048]  =  1.0
-  [2048, 2048]  =  1.0
-
- - -

Let's double check what our sparsity pattern looks like:

- - -
-using Plots
-spy(jac_sparsity,markersize=1,colorbar=false,color=:deep)
-
- - - - -

That's neat, and would be tedius to build by hand! Now we just pass it to the ODEFunction like as before:

- - -
-f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity)
-
- - -
-(::DiffEqBase.ODEFunction{true,typeof(Main.WeaveSandBox0.brusselator_2d_loo
-p),LinearAlgebra.UniformScaling{Bool},Nothing,Nothing,Nothing,SparseArrays.
-SparseMatrixCSC{Float64,Int64},Nothing,Nothing,Nothing,Nothing,Nothing}) (g
-eneric function with 7 methods)
-
- - -

Build the ODEProblem:

- - -
-function init_brusselator_2d(xyd)
-  N = length(xyd)
-  u = zeros(N, N, 2)
-  for I in CartesianIndices((N, N))
-    x = xyd[I[1]]
-    y = xyd[I[2]]
-    u[I,1] = 22*(y*(1-y))^(3/2)
-    u[I,2] = 27*(x*(1-x))^(3/2)
-  end
-  u
-end
-u0 = init_brusselator_2d(xyd_brusselator)
-prob_ode_brusselator_2d = ODEProblem(brusselator_2d_loop,
-                                     u0,(0.,11.5),p)
-
-prob_ode_brusselator_2d_sparse = ODEProblem(f,
-                                     u0,(0.,11.5),p)
-
- - -
-ODEProblem with uType Array{Float64,3} and tType Float64. In-place: true
-timespan: (0.0, 11.5)
-u0: [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715
-876 … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371
-586 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
-
-[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
-196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
-.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]
-
- - -

Now let's see how the version with sparsity compares to the version without:

- - -
-@btime solve(prob_ode_brusselator_2d,save_everystep=false)
-
- - -
-43.298 s (7317 allocations: 70.12 MiB)
-
- - - -
-@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false)
-
- - -
-23.900 s (367199 allocations: 896.99 MiB)
-retcode: Success
-Interpolation: 1st order linear
-t: 2-element Array{Float64,1}:
-  0.0
- 11.5
-u: 2-element Array{Array{Float64,3},1}:
- [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715876
- … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371586
- 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
-
-[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
-196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
-.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]                 
-                                                                           
-                                                                           
-                                                 
- [3.2183315970074036 3.2183043434767553 … 3.2184226343677738 3.218371247341
-7185; 3.2183804713733872 3.2183499447177057 … 3.2184831183646856 3.21842504
-7282479; … ; 3.218246108233481 3.2182241729222354 … 3.2183185170391946 3.21
-82778079052787; 3.2182863194790094 3.218261945024488 … 3.218367227674788 3.
-218321653767132]
-
-[2.364108254063361 2.364109732940303 … 2.364103502720394 2.3641061660225517
-; 2.364105345047017 2.3641069231419443 … 2.3641002347797833 2.3641031002634
-882; … ; 2.364113451334332 2.3641147252834216 … 2.364109297958111 2.3641116
-159339757; 2.3641109923384915 2.364112358364487 … 2.3641065653101885 2.3641
-090439583214]
-
- - -

Declaring Color Vectors for Fast Construction

-

If you cannot directly define a Jacobian function, you can use the colorvec to speed up the Jacobian construction. What the colorvec does is allows for calculating multiple columns of a Jacobian simultaniously by using the sparsity pattern. An explanation of matrix coloring can be found in the MIT 18.337 Lecture Notes.

-

To perform general matrix coloring, we can use SparseDiffTools.jl. For example, for the Brusselator equation:

- - -
-using SparseDiffTools
-colorvec = matrix_colors(jac_sparsity)
-@show maximum(colorvec)
-
- - -
-maximum(colorvec) = 12
-12
-
- - -

This means that we can now calculate the Jacobian in 12 function calls. This is a nice reduction from 2048 using only automated tooling! To now make use of this inside of the ODE solver, you simply need to declare the colorvec:

- - -
-f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity,
-                                    colorvec=colorvec)
-prob_ode_brusselator_2d_sparse = ODEProblem(f,
-                                     init_brusselator_2d(xyd_brusselator),
-                                     (0.,11.5),p)
-@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false)
-
- - -
-5.184 s (19039 allocations: 881.07 MiB)
-retcode: Success
-Interpolation: 1st order linear
-t: 2-element Array{Float64,1}:
-  0.0
- 11.5
-u: 2-element Array{Array{Float64,3},1}:
- [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715876
- … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371586
- 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
-
-[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
-196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
-.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]                 
-                                                                           
-                                                                           
-                                                   
- [3.2183373918177796 3.2183101409241526 … 3.2184284167956267 3.218377034566
-1604; 3.2183862623036537 3.218355740108313 … 3.218488896905827 3.2184308308
-09056; … ; 3.218251904608678 3.2182299624517134 … 3.2183243097118095 3.2182
-835995190024; 3.2182921103674285 3.218267738694001 … 3.2183730157748163 3.2
-183274412034346]
-
-[2.3641011711912463 2.364102627665652 … 2.364096424152248 2.364099082779794
-5; 2.3640982676790627 2.3640998304296703 … 2.3640931617281944 2.36409602465
-74303; … ; 2.364106344376436 2.3641076180295504 … 2.364102206048339 2.36410
-45205022344; 2.364103899515714 2.3641052552245445 … 2.3640994754056486 2.36
-41019485955153]
-
- - -

Notice the massive speed enhancement!

-

Defining Linear Solver Routines and Jacobian-Free Newton-Krylov

-

A completely different way to optimize the linear solvers for large sparse matrices is to use a Krylov subpsace method. This requires choosing a linear solver for changing to a Krylov method. Optionally, one can use a Jacobian-free operator to reduce the memory requirements.

-

Declaring a Jacobian-Free Newton-Krylov Implementation

-

To swap the linear solver out, we use the linsolve command and choose the GMRES linear solver.

- - -
-@btime solve(prob_ode_brusselator_2d,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)
-
- - -
-236.859 s (1266049 allocations: 120.80 MiB)
-
- - - -
-@btime solve(prob_ode_brusselator_2d_sparse,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)
-
- - -
-4.175 s (1327264 allocations: 59.92 MiB)
-retcode: Success
-Interpolation: 1st order linear
-t: 2-element Array{Float64,1}:
-  0.0
- 11.5
-u: 2-element Array{Array{Float64,3},1}:
- [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715876
- … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371586
- 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
-
-[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
-196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
-.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]                 
-                                                                           
-                                                                           
-                                              
- [2.8494040430340677 2.849376568123844 … 2.849495874352271 2.84944397101885
-77; 2.8494535304517883 2.8494226751421077 … 2.849557062218097 2.84949828863
-504; … ; 2.8493164846505232 2.849294110741412 … 2.8493903195873105 2.849349
-0728548774; 2.849357928360968 2.84933329062441 … 2.8494396335090886 2.84939
-36648688254]
-
-[2.8157264541468283 2.8157283534566693 … 2.8157208829524296 2.8157236606184
-397; 2.8157225956336194 2.815724834275517 … 2.815716958084277 2.81571990149
-71726; … ; 2.815734632998308 2.8157368388547357 … 2.8157282527277308 2.8157
-31663143054; 2.815730494353417 2.815732379564653 … 2.8157247313047327 2.815
-7277764523414]
-
- - -

For more information on linear solver choices, see the linear solver documentation.

-

On this problem, handling the sparsity correctly seemed to give much more of a speedup than going to a Krylov approach, but that can be dependent on the problem (and whether a good preconditioner is found).

-

We can also enhance this by using a Jacobian-Free implementation of f'(x)*v. To define the Jacobian-Free operator, we can use DiffEqOperators.jl to generate an operator JacVecOperator such that Jv*v performs f'(x)*v without building the Jacobian matrix.

- - -
-using DiffEqOperators
-Jv = JacVecOperator(brusselator_2d_loop,u0,p,0.0)
-
- - -
-DiffEqOperators.JacVecOperator{Float64,typeof(Main.WeaveSandBox0.brusselato
-r_2d_loop),Array{ForwardDiff.Dual{DiffEqOperators.JacVecTag,Float64,1},3},A
-rray{ForwardDiff.Dual{DiffEqOperators.JacVecTag,Float64,1},3},Array{Float64
-,3},NTuple{4,Float64},Float64,Bool}(Main.WeaveSandBox0.brusselator_2d_loop,
- ForwardDiff.Dual{DiffEqOperators.JacVecTag,Float64,1}[Dual{DiffEqOperators
-.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.12134432813715876,0.
-12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.1213443281371586,0.1
-213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); Dual{DiffEqOpera
-tors.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.1213443281371587
-6,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.1213443281371586
-,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); … ; Dual{Dif
-fEqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.12134432
-813715876,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.12134432
-81371586,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); Dual
-{DiffEqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.1213
-4432813715876,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.1213
-443281371586,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0)]
-
-ForwardDiff.Dual{DiffEqOperators.JacVecTag,Float64,1}[Dual{DiffEqOperators.
-JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0,0.0) … Dual{DiffEqO
-perators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); Dual
-{DiffEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755) Dual{D
-iffEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755) … Dual{D
-iffEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755) Dual{Dif
-fEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755); … ; Dual{
-DiffEqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738) Dual{Di
-ffEqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738) … Dual{Di
-ffEqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738) Dual{Diff
-EqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738); Dual{DiffE
-qOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0,0.0) … D
-ual{DiffEqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0
-,0.0)], ForwardDiff.Dual{DiffEqOperators.JacVecTag,Float64,1}[Dual{DiffEqOp
-erators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.1213443281371
-5876,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.1213443281371
-586,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); Dual{Diff
-EqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.121344328
-13715876,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.121344328
-1371586,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); … ; D
-ual{DiffEqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.1
-2134432813715876,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.1
-213443281371586,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0
-); Dual{DiffEqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}
-(0.12134432813715876,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}
-(0.1213443281371586,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0
-,0.0)]
-
-ForwardDiff.Dual{DiffEqOperators.JacVecTag,Float64,1}[Dual{DiffEqOperators.
-JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0,0.0) … Dual{DiffEqO
-perators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); Dual
-{DiffEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755) Dual{D
-iffEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755) … Dual{D
-iffEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755) Dual{Dif
-fEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755); … ; Dual{
-DiffEqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738) Dual{Di
-ffEqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738) … Dual{Di
-ffEqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738) Dual{Diff
-EqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738); Dual{DiffE
-qOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0,0.0) … D
-ual{DiffEqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0
-,0.0)], [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.1213443281
-3715876 … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.121344328
-1371586 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
-
-[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
-196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
-.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0], (3.4, 1.0, 10.0
-, 0.03225806451612903), 0.0, true, false, true)
-
- - -

and then we can use this by making it our jac_prototype:

- - -
-f = ODEFunction(brusselator_2d_loop;jac_prototype=Jv)
-prob_ode_brusselator_2d_jacfree = ODEProblem(f,u0,(0.,11.5),p)
-@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)
-
- - -
-3.066 s (1875298 allocations: 78.86 MiB)
-retcode: Success
-Interpolation: 1st order linear
-t: 2-element Array{Float64,1}:
-  0.0
- 11.5
-u: 2-element Array{Array{Float64,3},1}:
- [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715876
- … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371586
- 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
-
-[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
-196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
-.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]                 
-                                                                           
-                                                                           
-                                            
- [2.7872216645408567 2.787194432792592 … 2.78731308303355 2.787261467045361
-5; 2.787271339364228 2.787240720815802 … 2.787374377179153 2.78731605405600
-74; … ; 2.787134161549321 2.7871118187949984 … 2.7872072238860723 2.7871659
-77632712; 2.7871755020101205 2.7871508886342986 … 2.7872566948955084 2.7872
-10735234632]
-
-[2.8988126677437585 2.8988142936416157 … 2.8988075464551772 2.8988105556623
-86; 2.898808902249186 2.8988104514436563 … 2.898803969323616 2.898806883740
-06; … ; 2.898820028584711 2.898821666296394 … 2.898814592161897 2.898817604
-8750383; 2.8988163685403467 2.8988181996160387 … 2.8988111330962316 2.89881
-40808038274]
-
- - -

Adding a Preconditioner

-

The linear solver documentation shows how you can add a preconditioner to the GMRES. For example, you can use packages like AlgebraicMultigrid.jl to add an algebraic multigrid (AMG) or IncompleteLU.jl for an incomplete LU-factorization (iLU).

- - -
-using AlgebraicMultigrid
-pc = aspreconditioner(ruge_stuben(jac_sparsity))
-@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES(Pl=pc)),save_everystep=false)
-
- - -
-2.456 s (233048 allocations: 139.27 MiB)
-retcode: Success
-Interpolation: 1st order linear
-t: 2-element Array{Float64,1}:
-  0.0
- 11.5
-u: 2-element Array{Array{Float64,3},1}:
- [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715876
- … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371586
- 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
-
-[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
-196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
-.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]                 
-                                                                           
-                                                                           
-                                                             
- [3.5273952159283844e10 -1.4265682748702106e10 … 9234.374594756042 13421.86
-8437681665; -7.091075675799031e9 6.51451873695435e9 … 9234.400545337947 134
-21.868410996974; … ; 13421.868025883945 9234.400562276434 … 9234.4001922958
-72 13421.86842409367; 13421.868438369747 9234.37496117112 … 9234.3749749346
-17 13421.868424050659]
-
-[66730.63093229767 -115820.52698935539 … 16462.92400611659 16458.1794290617
-3; 8.043448946694581e6 1.307043107719831e7 … 11331.237739674985 11326.51840
-7046895; … ; 11326.51842066477 11331.237738901911 … 11331.237752373656 1132
-6.518406581376; 16458.179429033426 16462.923993307235 … 16462.923992815315 
-16458.179429539887]
-
- - -

Using Structured Matrix Types

-

If your sparsity pattern follows a specific structure, for example a banded matrix, then you can declare jac_prototype to be of that structure and then additional optimizations will come for free. Note that in this case, it is not necessary to provide a colorvec since the color vector will be analytically derived from the structure of the matrix.

-

The matrices which are allowed are those which satisfy the ArrayInterface.jl interface for automatically-colorable matrices. These include:

- -

Matrices which do not satisfy this interface can still be used, but the matrix coloring will not be automatic, and an appropriate linear solver may need to be given (otherwise it will default to attempting an LU-decomposition).

-

Sundials-Specific Handling

-

While much of the setup makes the transition to using Sundials automatic, there are some differences between the pure Julia implementations and the Sundials implementations which must be taken note of. These are all detailed in the Sundials solver documentation, but here we will highlight the main details which one should make note of.

-

Defining a sparse matrix and a Jacobian for Sundials works just like any other package. The core difference is in the choice of the linear solver. With Sundials, the linear solver choice is done with a Symbol in the linear_solver from a preset list. Particular choices of note are :Band for a banded matrix and :GMRES for using GMRES. If you are using Sundials, :GMRES will not require defining the JacVecOperator, and instead will always make use of a Jacobian-Free Newton Krylov (with numerical differentiation). Thus on this problem we could do:

- - -
-using Sundials
-# Sparse Version
-@btime solve(prob_ode_brusselator_2d_sparse,CVODE_BDF(),save_everystep=false)
-
- - -
-28.133 s (51388 allocations: 3.20 MiB)
-
- - - -
-# GMRES Version: Doesn't require any extra stuff!
-@btime solve(prob_ode_brusselator_2d,CVODE_BDF(linear_solver=:GMRES),save_everystep=false)
-
- - -
-323.286 ms (61058 allocations: 3.63 MiB)
-retcode: Success
-Interpolation: 1st order linear
-t: 2-element Array{Float64,1}:
-  0.0
- 11.5
-u: 2-element Array{Array{Float64,3},1}:
- [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715876
- … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371586
- 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
-
-[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
-196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
-.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]                 
-                                                                           
-                                                                           
-                                                  
- [0.45369441125092624 0.45367162922766396 … 0.45377307354145824 0.453728249
-24331306; 0.45372813444006976 0.45370139820263283 … 0.45382031508907966 0.4
-537681622154197; … ; 0.4536347409999057 0.4536184243336325 … 0.453690734603
-503 0.4536589378647838; 0.4536631791063342 0.4536436405637919 … 0.453729310
-5001047 0.45369169445940305]
-
-[5.023428953606044 5.023425514309876 … 5.02343972583798 5.0234337753788845;
- 5.023442660236476 5.023439873077652 … 5.02345101637559 5.023446317614284; 
-… ; 5.023404093671991 5.023399216246354 … 5.023419229667771 5.0234107290209
-42; 5.023415926060523 5.023411776722086 … 5.02342895844194 5.02342180621704
-3]
-
- - -

Details for setting up a preconditioner with Sundials can be found at the Sundials solver page.

-

Handling Mass Matrices

-

Instead of just defining an ODE as $u' = f(u,p,t)$, it can be common to express the differential equation in the form with a mass matrix:

-

\[ -Mu' = f(u,p,t) -\]

-

where $M$ is known as the mass matrix. Let's solve the Robertson equation. At the top we wrote this equation as:

-

\[ -\begin{align} -dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ -dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ -dy_3 &= 3*10^7 y_{3}^2 \\ -\end{align} -\]

-

But we can instead write this with a conservation relation:

-

\[ -\begin{align} -dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ -dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ -1 &= y_{1} + y_{2} + y_{3} \\ -\end{align} -\]

-

In this form, we can write this as a mass matrix ODE where $M$ is singular (this is another form of a differential-algebraic equation (DAE)). Here, the last row of M is just zero. We can implement this form as:

- - -
-using DifferentialEquations
-function rober(du,u,p,t)
-  y₁,y₂,y₃ = u
-  k₁,k₂,k₃ = p
-  du[1] = -k₁*y₁+k₃*y₂*y₃
-  du[2] =  k₁*y₁-k₂*y₂^2-k₃*y₂*y₃
-  du[3] =  y₁ + y₂ + y₃ - 1
-  nothing
-end
-M = [1. 0  0
-     0  1. 0
-     0  0  0]
-f = ODEFunction(rober,mass_matrix=M)
-prob_mm = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
-sol = solve(prob_mm,Rodas5())
-
-plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1))
-
- - - - -

Note that if your mass matrix is singular, i.e. your system is a DAE, then you need to make sure you choose a solver that is compatible with DAEs

- - - -
- - - -
-
-
- - diff --git a/html/exercises/01-workshop_exercises.html b/html/exercises/01-workshop_exercises.html deleted file mode 100644 index cdb72f84..00000000 --- a/html/exercises/01-workshop_exercises.html +++ /dev/null @@ -1,983 +0,0 @@ - - - - - - DifferentialEquations.jl Workshop Exercises - - - - - - - - - - - - - - - - - -
-
-
- -
-

DifferentialEquations.jl Workshop Exercises

-
Chris Rackauckas
- -
- -

These exercises teach common workflows which involve DifferentialEquations.jl. The designation (B) is for "Beginner", meaning that a user new to the package should feel comfortable trying this exercise. An exercise designated (I) is for "Intermediate", meaning the user may want to have some previous background in DifferentialEquations.jl or try some (B) exercises first. The additional (E) designation is for "Experienced", which are portions of exercises which may take some work.

-

The exercises are described as follows:

-
    -
  • Exercise 1 takes the user through solving a stiff ordinary differential equation and using the ModelingToolkit.jl to automatically convert the function to a symbolic form to derive the analytical Jacobian to speed up the solver. The same biological system is then solved with stochasticity, utilizing EnsembleProblems to understand 95% bounds on the solution. Finally, probabilistic programming is employed to perform Bayesian parameter estimation of the parameters against data.

    -
  • -
  • Exercise 2 takes the user through defining hybrid delay differential equation, that is a differential equation with events, and using differentiable programming techniques (automatic differentiation) to to perform gradient-based parameter estimation.

    -
  • -
  • Exercise 3 takes the user through differential-algebraic equation (DAE) modeling, the concept of index, and using both mass-matrix and implicit ODE representations. This will require doing a bit of math, but the student will understand how to change their equations to make their DAE numerically easier for the integrators.

    -
  • -
  • Exercise 4 takes the user through optimizing a PDE solver, utilizing automatic sparsity pattern recognition, automatic conversion of numerical codes to symbolic codes for analytical construction of the Jacobian, preconditioned GMRES, and setting up a solver for IMEX and GPUs, and compute adjoints of PDEs.

    -
  • -
  • Exercise 5 focuses on a chaotic orbit, utilizing parallel ensembles across supercomputers and GPUs to quickly describe phase space.

    -
  • -
  • Exercise 6 takes the user through training a neural stochastic differential equation, using GPU-accleration and adjoints through Flux.jl's neural network framework to build efficient training codes.

    -
  • -
-

This exercise worksheet is meant to be a living document leading new users through a deep dive of the DifferentialEquations.jl feature set. If you further suggestions or want to contribute new problems, please open an issue or PR at the DiffEqTutorials.jl repository.

-

Problem 1: Investigating Sources of Randomness and Uncertainty in a Stiff Biological System (B)

-

In this problem we will walk through the basics of simulating models with DifferentialEquations.jl. Let's take the Oregonator model of the Belousov-Zhabotinskii chemical reaction system. This system describes a classical example in non-equilibrium thermodynmics and is a well-known natural chemical oscillator.

-

Part 1: Simulating the Oregonator ODE model

-

When modeling, usually one starts off by investigating the deterministic model. The deterministic ODE formulation of the Oregonator is given by the equations

-

\[ -\begin{align} -\frac{dx}{dt} &= s(y-xy + x - qx^2)\\ -\frac{dy}{dt} &= (-y - xy + z)/s\\ -\frac{dz}{dt} &= w(x - z)\end{align} -\]

-

with parameter values $s=77.27$, $w=0.161$, and $q=8.375 \times 10^{-6}$, and initial conditions $x(0)=1$, $y(0)=2$, and $z(0)=3$. Use the tutorial on solving ODEs to solve this differential equation on the timespan of $t\in[0,360]$ with the default ODE solver. To investigate the result, plot the solution of all components over time, and plot the phase space plot of the solution (hint: use vars=(1,2,3)). What shape is being drawn in phase space?

-

Part 2: Investigating Stiffness

-

Because the reaction rates of q vs s is very large, this model has a "fast" system and a "slow" system. This is typical of ODEs which exhibit a property known as stiffness. Stiffness changes the ODE solvers which can handle the equation well. Take a look at the ODE solver page and investigate solving the equation using methods for non-stiff equations (ex: Tsit5) and stiff equations (ex: Rodas5).

-

Benchmark using $t\in[0,50]$ using @btime from BenchmarkTools.jl. What happens when you increase the timespan?

-

(Optional) Part 3: Specifying Analytical Jacobians (I)

-

Stiff ODE solvers internally utilize the Jacobian of the ODE system in order to improve the stepsizes in the solution. However, computing and factorizing the Jacobian is costly, and thus it can be beneficial to provide the analytical solution.

-

Use the ODEFunction definition page to define an ODEFunction which holds both the OREGO ODE and its Jacobian, and solve using Rodas5.

-

(Optional) Part 4: Automatic Symbolicification and Analytical Jacobian Calculations

-

Deriving Jacobians by hand is tedious. Thankfully symbolic mathematical systems can do the work for you. And thankfully, DifferentialEquations.jl has tools to automatically convert numerical problems into symbolic problems to perform the analysis on!

-

follow the ModelingToolkit.jl README to automatically convert your ODE definition to its symbolic form using modelingtoolkitize and calculate the analytical Jacobian. Use the compilation functions to build the ODEFunction with the embedded analytical solution.

-

Part 5: Adding stochasticity with stochastic differential equations

-

How does this system react in the presense of stochasticity? We can investigate this question by using stochastic differential equations. A stochastic differential equation formulation of this model is known as the multiplicative noise model, is created with:

-

\[ -\begin{align} -dx &= s(y-xy + x - qx^2)dt + \sigma_1 x dW_1\\ -dy &= \frac{-y - xy + z}{s}dt + \sigma_2 y dW_2\\ -dz &= w(x - z)dt + \sigma_3 z dW_3\end{align} -\]

-

with $\sigma_i = 0.1$ where the dW terms describe a Brownian motion, a continuous random process with normally distributed increments. Use the tutorial on solving SDEs to solve simulate this model. Then, use the EnsembleProblem to generate and plot 100 trajectories of the stochastic model, and use EnsembleSummary to plot the mean and 5%-95% region over time.

-

Try solving with the ImplicitRKMil and SOSRI methods. Notice that it isn't stiff every single time!

-

(For fun, see if you can make the Euler-Maruyama EM() method solve this equation. This requires a choice of dt small enough to be stable. This is the "standard" method!)

-

Part 6: Gillespie jump models of discrete stochasticity

-

When biological models have very few particles, continuous models no longer make sense, and instead using the full discrete formulation can be required to accuracy describe the dynamics. A discrete differential equation, or Gillespie model, is a continuous-time Markov chain with Poisson-distributed jumps. A discrete description of the Oregonator model is given by a chemical reaction systems:

- - - -
-A+Y -> X+P
-X+Y -> 2P
-A+X -> 2X + 2Z
-2X  -> A + P (note: this has rate kX^2!)
-B + Z -> Y
-
- - -

where reactions take place at a rate which is propoertional to its components, i.e. the first reaction has a rate k*A*Y for some k. Use the tutorial on Gillespie SSA models to implement the JumpProblem for this model, and use the EnsembleProblem and EnsembleSummary to characterize the stochastic trajectories.

-

For what rate constants does the model give the oscillatory dynamics for the ODE approximation? For information on the true reaction rates, consult the original paper.

-

Part 7: Probabilistic Programming / Bayesian Parameter Estimation with DiffEqBayes.jl + Turing.jl (I)

-

In many casees, one comes to understand the proper values for their model's parameters by utilizing data fitting techniques. In this case, we will use the DiffEqBayes.jl library to perform a Bayesian estimation of the parameters. For our data we will the following potential output:

- - - -
-t = 0.0:1.0:30.0
-data = [1.0 2.05224 2.11422 2.1857 2.26827 2.3641 2.47618 2.60869 2.7677 2.96232 3.20711 3.52709 3.97005 4.64319 5.86202 9.29322 536.068 82388.9 57868.4 1.00399 1.00169 1.00117 1.00094 1.00082 1.00075 1.0007 1.00068 1.00066 1.00065 1.00065 1.00065
-        2.0 1.9494 1.89645 1.84227 1.78727 1.73178 1.67601 1.62008 1.56402 1.50772 1.45094 1.39322 1.33366 1.2705 1.19958 1.10651 0.57194 0.180316 0.431409 251.774 591.754 857.464 1062.78 1219.05 1335.56 1419.88 1478.22 1515.63 1536.25 1543.45 1539.98
-        3.0 2.82065 2.68703 2.58974 2.52405 2.48644 2.47449 2.48686 2.52337 2.58526 2.67563 2.80053 2.9713 3.21051 3.5712 4.23706 12.0266 14868.8 24987.8 23453.4 19202.2 15721.6 12872.0 10538.8 8628.66 7064.73 5784.29 4735.96 3877.66 3174.94 2599.6]
-
- - -

Follow the exmaples on the parameter estimation page to perform a Bayesian parameter estimation. What are the most likely parameters for the model given the posterior parameter distributions?

-

Use the ODEProblem to perform the fit. If you have time, use the EnsembleProblem of SDEProblems to perform a fit over averages of the SDE solutions. Note that the SDE fit will take significantly more computational resources! See the GPU parallelism section for details on how to accelerate this.

-

(Optional) Part 8: Using DiffEqBiological's Reaction Network DSL

-

DiffEqBiological.jl is a helper library for the DifferentialEquations.jl ecosystem for defining chemical reaction systems at a high leevel for easy simulation in these various forms. Use the descrption from the Chemical Reaction Networks documentation page to build a reaction network and generate the ODE/SDE/jump equations, and compare the result to your handcoded versions.

-

Problem 2: Fitting Hybrid Delay Pharmacokinetic Models with Automated Responses (B)

-

Hybrid differential equations are differential equations with events, where events are some interaction that occurs according to a prespecified condition. For example, the bouncing ball is a classic hybrid differential equation given by an ODE (Newton's Law of Gravity) mixed with the fact that, whenever the ball hits the floor (x=0), then the velocity of the ball flips (v=-v).

-

In addition, many models incorporate delays, that is the driving force of the equation is dependent not on the current values, but values from the past. These delay differential equations model how individuals in the economy act on old information, or that biological processes take time to adapt to a new environment.

-

In this equation we will build a hybrid delayed pharmacokinetic model and use the parameter estimation techniques to fit this it to a data.

-

Part 1: Defining an ODE with Predetermined Doses

-

First, let's define the simplest hybrid ordinary differential equation: an ODE where the events take place at fixed times. The ODE we will use is known as the one-compartment model:

-

\[ -\begin{align} -\frac{d[Depot]}{dt} &= -K_a [Depot] + R\\ -\frac{d[Central]}{dt} &= K_a [Depot] - K_e [Central]\end{align} -\]

-

with $t \in [0,90]$, $u_0 = [100.0,0]$, and $p=[K_a,K_e]=[2.268,0.07398]$.

-

With this model, use the event handling documentation page to define a DiscreteCallback which fires at t ∈ [24,48,72] and adds a dose of 100 into [Depot]. (Hint: you'll want to set tstops=[24,48,72] to force the ODE solver to step at these times).

-

Part 2: Adding Delays

-

Now let's assume that instead of there being one compartment, there are many transit compartment that the drug must move through in order to reach the central compartment. This effectively delays the effect of the transition from [Depot] to [Central]. To model this effect, we will use the delay differential equation which utilizes a fixed time delay $\tau$:

-

\[ -\begin{align} -\frac{d[Depot]}{dt} &= -K_a [Depot](t)\\ -\frac{d[Central]}{dt} &= K_a [Depot](t-\tau) - K_e [Central]\end{align} -\]

-

where the parameter $τ = 6.0$. Use the DDE tutorial to define and solve this delayed version of the hybrid model.

-

Part 3: Automatic Differentiation (AD) for Optimization (I)

-

In order to fit parameters $(K_a,K_e,\tau)$ we will want to be able to calculate the gradient of the solution with respect to the initial conditions. One way to do this is via Automatic Differentition (AD). For small numbers of parameters (<100), it is fastest to use Forward-Mode Automatic Differentition (even faster than using adjoint sensitivity analysis!). Thus for this problem we will make use of ForwardDiff.jl to use Dual number arithmetic to retrive both the solution and its derivative w.r.t. parameters in a single solve.

-

Use the information from the page on local sensitvity analysis to define the input dual numbers, solve the equation, and plot both the solution over time and the derivative of the solution w.r.t. the parameters.

-

Part 4: Fitting Known Quantities with DiffEqParamEstim.jl + Optim.jl

-

Now let's fit the delayed model to a dataset. For the data, use the array

- - - -
-t = 0.0:12.0:90.0
-data = [100.0 0.246196 0.000597933 0.24547 0.000596251 0.245275 0.000595453 0.245511
-        0.0 53.7939 16.8784 58.7789 18.3777 59.1879 18.5003 59.2611]
-
- - -

Use the parameter estimation page to define a loss function with build_loss_objective and optimize the parameters against the data. What parameters were used to generate the data?

-

Part 5: Implementing Control-Based Logic with ContinuousCallbacks (I)

-

Now that we have fit our delay differential equation model to the dataset, we want to start testing out automated treatment strategies. Let's assume that instead of giving doses at fixed time points, we invent a wearable which monitors the patient and administers a dose whenever the internal drug concentration falls below 25. To model this effect, we will need to use ContinuousCallbacks to define a callback that triggers when [Central] falls below the threshold value.

-

Use the documentation on the event handling page to define such a callback, and plot the solution over time. How many times does the auto-doser administer a dose? How much does this change as you change the delay time $\tau$?

-

Part 6: Global Sensitivity Analysis with the Morris and Sobol Methods

-

To understand how the parameters effect the solution in a global sense, one wants to use Global Sensitivity Analysis. Use the GSA documentation page perform global sensitivity analysis and quantify the effect of the various parameters on the solution.

-

Problem 3: Differential-Algebraic Equation Modeling of a Double Pendulum (B)

-

Differential-Algebraic Equaton (DAE) systems are like ODEs but allow for adding constraints into the models. This problem will look at solving the double penulum problem with enforcement of the rigid body constraints, requiring that the total distance L is constant throughout the simulation. While these equations can be rewritten in an ODE form, in many cases it can be simpler to solve the equation directly with the constraints. This tutorial will cover both the idea of index, how to manually perform index reduction, and how to make use of mass matrix and implicit ODE solvers to handle these problems.

-

Part 1: Simple Introduction to DAEs: Mass-Matrix Robertson Equations

-

A mass-matrix ordinary differential equation (ODE) is an ODE where the left-hand side, the derivative side, is multiplied by a matrix known as the mass matrix. This is described as:

-

\[ -Mu' = f(u,p,t) -\]

-

where $M$ is the mass matrix. When $M$ is invertible, there is an ODE which is equivalent to this formulation. When $M$ is not invertible, this can have a distinctly different behavior and is as Differential-Algebraic Equation (DAE).

-

Solve the Robertson DAE:

-

\[ -\begin{align} -\frac{dy_1}{dt} &= -0.04y_1 + 10^4 y_2y_3\\ -\frac{dy_2}{dt} &= 0.04y_1 - 10^4 y_2y_3 - 3\times 10^7 y_2^2\\ -1 &= y_1 + y_2 + y_3\end{align} -\]

-

with $y(0) = [1,0,0]$ and $dy(0) = [-0.04,0.04,0.0]$ using the mass-matrix formulation and Rodas5(). Use the ODEProblem page to find out how to declare a mass matrix.

-

(Hint: what if the last row has all zeros?)

-

Part 2: Solving the Implicit Robertson Equations with IDA

-

Use the DAE Tutorial to define a DAE in its implicit form and solve the Robertson equation with IDA. Why is differential_vars = [true,true,false]?

-

Part 3: Manual Index Reduction of the Single Pendulum

-

Part 4: Single Pendulum Solution with IDA

-

Part 5: Solving the Double Penulum DAE System

-

Problem 4: Performance Optimizing and Parallelizing Semilinear PDE Solvers (I)

-

This problem will focus on implementing and optimizing the solution of the 2-dimensional Brusselator equations. The BRUSS equations are a well-known highly stiff oscillatory system of partial differential equations which are used in stiff ODE solver benchmarks. In this tutorial we will walk first through a simple implmentation, then do allocation-free implementations and looking deep into solver options and benchmarking.

-

Part 1: Implementing the BRUSS PDE System as ODEs

-

The Brusselator PDE is defined as follows:

-

\[ -\begin{align} -\frac{\partial u}{\partial t} &= 1 + u^2v - 4.4u + \alpha(\frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2}) + f(x, y, t)\\ -\frac{\partial v}{\partial t} &= 3.4u - u^2v + \alpha(\frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2})\end{align} -\]

-

where

-

\[ -f(x, y, t) = \begin{cases} -5 & \quad \text{if } (x-0.3)^2+(y-0.6)^2 ≤ 0.1^2 \text{ and } t ≥ 1.1 \\ -0 & \quad \text{else}\end{cases} -\]

-

and the initial conditions are

-

\[ -\begin{align} -u(x, y, 0) &= 22\cdot y(1-y)^{3/2} \\ -v(x, y, 0) &= 27\cdot x(1-x)^{3/2}\end{align} -\]

-

with the periodic boundary condition

-

\[ -\begin{align} -u(x+1,y,t) &= u(x,y,t) \\ -u(x,y+1,t) &= u(x,y,t)\end{align} -\]

-

on a timespan of $t \in [0,22]$.

-

To solve this PDE, we will discretize it into a system of ODEs with the finite difference method. We discretize u and v into arrays of the values at each time point: u[i,j] = u(i*dx,j*dy) for some choice of dx/dy, and same for v. Then our ODE is defined with U[i,j,k] = [u v]. The second derivative operator, the Laplacian, discretizes to become the Tridiagonal matrix with [1 -2 1] and a 1 in the top left and right corners. The nonlinear functions are then applied at each point in space (they are broadcast). Use dx=dy=1/32.

-

You will know when you have the correct solution when you plot the solution at x=0.25 and see a periodic orbit.

-

If you are not familiar with this process, see the Gierer-Meinhardt example from the DiffEqTutorials.

-

Note: Start by doing the simplest implementation!

-

Part 2: Optimizing the BRUSS Code

-

PDEs are expensive to solve, and so we will go nowhere without some code optimizing! Follow the steps described in the the Gierer-Meinhardt example from the DiffEqTutorials to optimize your Brusselator code. Try other formulations and see what ends up the fastest! Find a trade-off between performance and simplicity that suits your needs.

-

Part 3: Exploiting Jacobian Sparsity with Color Differentiation

-

Use the sparsity! function from SparseDiffTools to generate the sparsity pattern for the Jacobian of this problem. Follow the documentations on the DiffEqFunction page to specify the sparsity pattern of the Jacobian. Generate an add the color vector to speed up the computation of the Jacobian.

-

(Optional) Part 4: Structured Jacobians

-

Specify the sparsity pattern using a BlockBandedMatrix from BlockBandedMatrices.jl to accelerate the previous sparsity handling tricks.

-

(Optional) Part 5: Automatic Symbolicification and Analytical Jacobian

-

Use the modelingtoolkitize function from ModelingToolkit.jl to convert your numerical ODE function into a symbolic ODE function and use that to compute and solve with an analytical sparse Jacobian.

-

Part 6: Utilizing Preconditioned-GMRES Linear Solvers

-

Use the linear solver specification page to solve the equation with TRBDF2 with GMRES. Use the Sundials documentation to solve the equation with CVODE_BDF with Sundials' special internal GMRES. To both of these, use the AlgebraicMultigrid.jl to add a preconditioner to the GMRES solver.

-

Part 7: Exploring IMEX and Exponential Integrator Techniques (E)

-

Instead of using the standard ODEProblem, define a SplitODEProblem to move some of the equation to the the "non-stiff part". Try different splits and solve with KenCarp4 to see if the solution can be accelerated.

-

Next, use DiffEqArrayOperator to define part of the equation as linear, and use the ETDRK4 exponential integrator to solve the equation. Note that this technique is not appropriate for this equation since it relies on the nonlinear term being non-stiff for best results.

-

Part 8: Work-Precision Diagrams for Benchmarking Solver Choices

-

Use the WorkPrecisionSet method from DiffEqDevTools.jl to benchmark multiple different solver methods and find out what combination is most efficient. Take a look at DiffEqBenchmarks.jl for usage examples.

-

Part 9: GPU-Parallelism for PDEs (E)

-

Fully vectorize your implementation of the ODE and use a CuArray from CuArrays.jl as the initial condition to cause the whole solution to be GPU accelerated.

-

Part 10: Adjoint Sensitivity Analysis for Gradients of PDEs

-

In order to optimize the parameters of a PDE, you need to be able to compute the gradient of the solution with respect to the parameters. This is done through sensitivity analysis. For PDEs, generally the system is at a scale where forward sensitivity analysis (forward-mode automatic differentiation) is no longer suitable, and for these cases one uses adjoint sensitivity analysis.

-

Rewrite the PDE so the constant terms are parameters, and use the adjoint sensitivity analysis documentation to solve for the solution gradient with a cost function being the L2 distance of the solution from the value 1. Solve with interpolated and checkpointed adjoints. Play with using reverse-mode automatic differentiation vs direct computation of vector-Jacobian products using the autojacvec option of the SensitivityAlg. Find the set of options most suitable for this PDE.

-

If you have compute time, use this adjoint to optimize the parameters of the PDE with respect to this cost function.

-

Problem 5: Global Parameter Sensitivity and Optimality with GPU and Distributed Ensembles (B)

-

In this example we will investigate how the parameters "generally" effect the solution in the chaotic Henon-Heiles system. By "generally" we will use global sensitivity analysis methods to get an average global characterization of the parameters on the solution. In addition to a global sensitivity approach, we will generate large ensembles of solutions with different parameters using a GPU-based parallelism approach.

-

Part 1: Implementing the Henon-Heiles System (B)

-

The Henon-Heiles Hamiltonian system is described by the ODEs:

-

\[ -\begin{align} -\frac{dp_1}{dt} &= -q_1 (1 + 2q_2)\\ -\frac{dp_2}{dt} &= -q_2 - (q_1^2 - q_2^2)\\ -\frac{dq_1}{dt} &= p_1\\ -\frac{dq_2}{dt} &= p_2\end{align} -\]

-

with initial conditions $u_0 = [0.1,0.0,0.0,0.5]$. Solve this system over the timespan $t\in[0,1000]$

-

(Optional) Part 2: Alternative Dynamical Implmentations of Henon-Heiles (B)

-

The Henon-Heiles defines a Hamiltonian system with certain structures which can be utilized for a more efficient solution. Use the Dynamical problems page to define a SecondOrderODEProblem corresponding to the acceleration terms:

-

\[ -\begin{align} -\frac{dp_1^2}{dt} &= -q_1 (1 + 2q_2)\\ -\frac{dp_2^2}{dt} &= -q_2 - (q_1^2 - q_2^2)\end{align} -\]

-

Solve this with a method that is specific to dynamical problems, like DPRKN6.

-

The Hamiltonian can also be directly described:

-

\[ -H(p,q) = \frac{1}{2}(p_1^2 + p_2^2) + \frac{1}{2}(q_1^2+q_2^2+2q_1^2 q_2 - \frac{2}{3}q_2^3) -\]

-

Solve this problem using the HamiltonianProblem constructor from DiffEqPhysics.jl.

-

Part 3: Parallelized Ensemble Solving

-

To understand the orbits of the Henon-Heiles system, it can be useful to solve the system with many different initial conditions. Use the ensemble interface to solve with randomized initial conditions in parallel using threads with EnsembleThreads(). Then, use addprocs() to add more cores and solve using EnsembleDistributed(). The former will solve using all of the cores on a single computer, while the latter will use all of the cores on which there are processors, which can include thousands across a supercomputer! See Julia's parallel computing setup page for more details on the setup.

-

Part 4: Parallelized GPU Ensemble Solving

-

Setup the CUDAnative.jl library and use the EnsembleGPUArray() method to parallelize the solution across the thousands of cores of a GPU. Note that this will efficiency solve for hundreds of thousands of trajectores.

-

Problem 6: Training Neural Stochastic Differential Equations with GPU acceleration (I)

-

In the previous models we had to define a model. Now let's shift the burden of model-proofing onto data by utilizing neural differential equations. A neural differential equation is a differential equation where the model equations are replaced, either in full or in part, by a neural network. For example, a neural ordinary differential equation is an equation $u^\prime = f(u,p,t)$ where $f$ is a neural network. We can learn this neural network from data using various methods, the easiest of which is known as the single shooting method, where one chooses neural network parameters, solves the equation, and checks the ODE's solution against data as a loss.

-

In this example we will define and train various forms of neural differential equations. Note that all of the differential equation types are compatible with neural differential equations, so this is only going to scratch the surface of the possibilites!

-

Part 1: Constructing and Training a Basic Neural ODE

-

Use the DiffEqFlux.jl README to construct a neural ODE to train against the training data:

- - - -
-u0 = Float32[2.; 0.]
-datasize = 30
-tspan = (0.0f0,1.5f0)
-
-function trueODEfunc(du,u,p,t)
-    true_A = [-0.1 2.0; -2.0 -0.1]
-    du .= ((u.^3)'true_A)'
-end
-t = range(tspan[1],tspan[2],length=datasize)
-prob = ODEProblem(trueODEfunc,u0,tspan)
-ode_data = Array(solve(prob,Tsit5(),saveat=t))
-
- - -

Part 2: GPU-accelerating the Neural ODE Process

-

Use the gpu function from Flux.jl to transform all of the calculations onto the GPU and train the neural ODE using GPU-accelerated Tsit5 with adjoints.

-

Part 3: Defining and Training a Mixed Neural ODE

-

Gather data from the Lotka-Volterra equation:

- - - -
-function lotka_volterra(du,u,p,t)
-  x, y = u
-  α, β, δ, γ = p
-  du[1] = dx = α*x - β*x*y
-  du[2] = dy = -δ*y + γ*x*y
-end
-u0 = [1.0,1.0]
-tspan = (0.0,10.0)
-p = [1.5,1.0,3.0,1.0]
-prob = ODEProblem(lotka_volterra,u0,tspan,p)
-sol = Array(solve(prob,Tsit5())(0.0:1.0:10.0))
-
- - -

Now use the mixed neural section of the documentation to define the mixed neural ODE where the functional form of $\frac{dx}{dt}$ is known, and try to derive a neural formulation for $\frac{dy}{dt}$ directly from the data.

-

Part 4: Constructing a Basic Neural SDE

-

Generate data from the Lotka-Volterra equation with multiplicative noise

- - - -
-function lotka_volterra(du,u,p,t)
-  x, y = u
-  α, β, δ, γ = p
-  du[1] = dx = α*x - β*x*y
-  du[2] = dy = -δ*y + γ*x*y
-end
-function lv_noise(du,u,p,t)
-  du[1] = p[5]*u[1]
-  du[2] = p[6]*u[2]
-end
-u0 = [1.0,1.0]
-tspan = (0.0,10.0)
-p = [1.5,1.0,3.0,1.0,0.1,0.1]
-prob = SDEProblem(lotka_volterra,lv_noise,u0,tspan,p)
-sol = [Array(solve(prob,SOSRI())(0.0:1.0:10.0)) for i in 1:20] # 20 solution samples
-
- - -

Train a neural stochastic differential equation $dX = f(X)dt + g(X)dW_t$ where both the drift ($f$) and the diffusion ($g$) functions are neural networks. See if constraining $g$ can make the problem easier to fit.

-

Part 5: Optimizing the training behavior with minibatching (E)

-

Use minibatching on the data to improve the training procedure. An example can be found at this PR.

- - - -
- - - -
-
-
- - diff --git a/html/exercises/02-workshop_solutions.html b/html/exercises/02-workshop_solutions.html deleted file mode 100644 index 1b0ae750..00000000 --- a/html/exercises/02-workshop_solutions.html +++ /dev/null @@ -1,1030 +0,0 @@ - - - - - - DifferentialEquations.jl Workshop Exercise Solutions - - - - - - - - - - - - - - - - - -
-
-
- -
-

DifferentialEquations.jl Workshop Exercise Solutions

-
Chris Rackauckas
- -
- -

Problem 1: Investigating Sources of Randomness and Uncertainty in a Biological System

-

Part 1: Simulating the Oregonator ODE model

- - -
-using DifferentialEquations, Plots
-function orego(du,u,p,t)
-  s,q,w = p
-  y1,y2,y3 = u
-  du[1] = s*(y2+y1*(1-q*y1-y2))
-  du[2] = (y3-(1+y1)*y2)/s
-  du[3] = w*(y1-y3)
-end
-p = [77.27,8.375e-6,0.161]
-prob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,360.0),p)
-sol = solve(prob)
-plot(sol)
-
- - - - - -
-plot(sol,vars=(1,2,3))
-
- - - - -

Part 2: Investigating Stiffness

- - -
-using BenchmarkTools
-prob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,50.0),p)
-@btime sol = solve(prob,Tsit5())
-
- - -
-3.027 s (8723181 allocations: 920.68 MiB)
-retcode: Success
-Interpolation: specialized 4th order "free" interpolation
-t: 872306-element Array{Float64,1}:
-  0.0                 
-  0.016189218375969157
-  0.023553748136851134
-  0.038180267679755686
-  0.050503297498957454
-  0.06810643682329323 
-  0.08676314534460629 
-  0.11145303081419239 
-  0.1410587592990862  
-  0.18104737342146363 
-  ⋮                   
- 49.99977332573522    
- 49.9998045817995     
- 49.999835837885485   
- 49.99986709399317    
- 49.99989835012255    
- 49.99992960627363    
- 49.999960862446414   
- 49.9999921186409     
- 50.0                 
-u: 872306-element Array{Array{Float64,1},1}:
- [1.0, 2.0, 3.0]            
- [1.71286, 1.99961, 2.99591]
- [1.83763, 1.99937, 2.99447]
- [1.94804, 1.99883, 2.99191]
- [1.98078, 1.99836, 2.98988]
- [1.99652, 1.99768, 2.98705]
- [2.00125, 1.99696, 2.98409]
- [2.00327, 1.996, 2.98019]  
- [2.00461, 1.99484, 2.97555]
- [2.0062, 1.99328, 2.96932] 
- ⋮                          
- [1.00114, 1453.02, 414.832]
- [1.00114, 1453.02, 414.83] 
- [1.00114, 1453.02, 414.828]
- [1.00114, 1453.01, 414.826]
- [1.00114, 1453.01, 414.824]
- [1.00114, 1453.01, 414.822]
- [1.00114, 1453.01, 414.82] 
- [1.00114, 1453.01, 414.818]
- [1.00088, 1453.01, 414.817]
-
- - - -
-@btime sol = solve(prob,Rodas5())
-
- - -
-510.971 μs (2920 allocations: 175.86 KiB)
-retcode: Success
-Interpolation: 3rd order Hermite
-t: 110-element Array{Float64,1}:
-  0.0                 
-  0.019615259849088615
-  0.029598267922660175
-  0.047052910887750835
-  0.06489945147114441 
-  0.08933211282883743 
-  0.12069352237075688 
-  0.16655179061086892 
-  0.24088874148540496 
-  0.39558172278217235 
-  ⋮                   
- 26.75710407571649    
- 27.982394888737232   
- 29.7694090380865     
- 32.21886344926688    
- 35.09441917419525    
- 38.498626966839055   
- 42.33882931016379    
- 46.609195570565284   
- 50.0                 
-u: 110-element Array{Array{Float64,1},1}:
- [1.0, 2.0, 3.0]            
- [1.78041, 1.9995, 2.99522] 
- [1.89877, 1.99915, 2.99338]
- [1.97458, 1.9985, 2.99044] 
- [1.995, 1.99781, 2.98756]  
- [2.0016, 1.99686, 2.98368] 
- [2.00375, 1.99564, 2.97874]
- [2.00564, 1.99384, 2.97157]
- [2.00859, 1.99093, 2.9601] 
- [2.01481, 1.98485, 2.93677]
- ⋮                          
- [1.00095, 1052.21, 17454.4]
- [1.00079, 1266.47, 14329.7]
- [1.00067, 1490.32, 10747.2]
- [1.0006, 1670.97, 7245.14] 
- [1.00057, 1758.48, 4560.57]
- [1.00057, 1757.6, 2636.71] 
- [1.00059, 1683.83, 1421.32]
- [1.00064, 1561.03, 715.164]
- [1.00069, 1452.9, 414.722]
-
- - -

(Optional) Part 3: Specifying Analytical Jacobians (I)

-

(Optional) Part 4: Automatic Symbolicification and Analytical Jacobian Calculations

-

Part 5: Adding stochasticity with stochastic differential equations

- - -
-function orego(du,u,p,t)
-  s,q,w = p
-  y1,y2,y3 = u
-  du[1] = s*(y2+y1*(1-q*y1-y2))
-  du[2] = (y3-(1+y1)*y2)/s
-  du[3] = w*(y1-y3)
-end
-function g(du,u,p,t)
-  du[1] = 0.1u[1]
-  du[2] = 0.1u[2]
-  du[3] = 0.1u[3]
-end
-p = [77.27,8.375e-6,0.161]
-prob = SDEProblem(orego,g,[1.0,2.0,3.0],(0.0,30.0),p)
-sol = solve(prob,SOSRI())
-plot(sol)
-
- - - - - -
-sol = solve(prob,ImplicitRKMil()); plot(sol)
-
- - - - -
-sol = solve(prob,ImplicitRKMil()); plot(sol)
-
- - - -

Part 6: Gillespie jump models of discrete stochasticity

-

Part 7: Probabilistic Programming / Bayesian Parameter Estimation with DiffEqBayes.jl + Turing.jl (I)

-

The data was generated with:

- - -
-function orego(du,u,p,t)
-  s,q,w = p
-  y1,y2,y3 = u
-  du[1] = s*(y2+y1*(1-q*y1-y2))
-  du[2] = (y3-(1+y1)*y2)/s
-  du[3] = w*(y1-y3)
-end
-p = [60.0,1e-5,0.2]
-prob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,30.0),p)
-sol = solve(prob,Rodas5(),abstol=1/10^14,reltol=1/10^14)
-
- - -
-retcode: Success
-Interpolation: 3rd order Hermite
-t: 48799-element Array{Float64,1}:
-  0.0                   
-  0.00013773444266123363
-  0.000201070235058078  
-  0.0003020539265117362 
-  0.0004030376179653944 
-  0.000505840575661047  
-  0.0006092634319273482 
-  0.0007136360693805303 
-  0.0008186320050683297 
-  0.000924255465457595  
-  ⋮                     
- 29.79488308974022      
- 29.82256859717493      
- 29.850254104609636     
- 29.877939612044344     
- 29.90562511947905      
- 29.93331062691376      
- 29.960996134348466     
- 29.988681641783174     
- 30.0                   
-u: 48799-element Array{Array{Float64,1},1}:
- [1.0, 2.0, 3.0]            
- [1.00823, 2.0, 2.99995]    
- [1.01199, 2.0, 2.99992]    
- [1.01796, 1.99999, 2.99988]
- [1.02389, 1.99999, 2.99984]
- [1.02989, 1.99999, 2.9998] 
- [1.0359, 1.99999, 2.99976] 
- [1.04191, 1.99999, 2.99972]
- [1.04793, 1.99999, 2.99968]
- [1.05395, 1.99998, 2.99964]
- ⋮                          
- [1.00065, 1541.44, 2708.42]
- [1.00065, 1541.27, 2693.47]
- [1.00065, 1541.08, 2678.6] 
- [1.00065, 1540.89, 2663.82]
- [1.00065, 1540.7, 2649.12] 
- [1.00065, 1540.49, 2634.49]
- [1.00065, 1540.28, 2619.95]
- [1.00065, 1540.07, 2605.49]
- [1.00065, 1539.98, 2599.6]
-
- - -

(Optional) Part 8: Using DiffEqBiological's Reaction Network DSL

-

Problem 2: Fitting Hybrid Delay Pharmacokinetic Models with Automated Responses (B)

-

Part 1: Defining an ODE with Predetermined Doses

- - -
-function onecompartment(du,u,p,t)
-  Ka,Ke = p
-  du[1] = -Ka*u[1]
-  du[2] =  Ka*u[1] - Ke*u[2]
-end
-p = (Ka=2.268,Ke=0.07398)
-prob = ODEProblem(onecompartment,[100.0,0.0],(0.0,90.0),p)
-
-tstops = [24,48,72]
-condition(u,t,integrator) = t  tstops
-affect!(integrator) = (integrator.u[1] += 100)
-cb = DiscreteCallback(condition,affect!)
-sol = solve(prob,Tsit5(),callback=cb,tstops=tstops)
-plot(sol)
-
- - - - -

Part 2: Adding Delays

- - -
-function onecompartment_delay(du,u,h,p,t)
-  Ka,Ke,τ = p
-  delayed_depot = h(p,t-τ)[1]
-  du[1] = -Ka*u[1]
-  du[2] =  Ka*delayed_depot - Ke*u[2]
-end
-p = (Ka=2.268,Ke=0.07398,τ=6.0)
-h(p,t) = [0.0,0.0]
-prob = DDEProblem(onecompartment_delay,[100.0,0.0],h,(0.0,90.0),p)
-
-tstops = [24,48,72]
-condition(u,t,integrator) = t  tstops
-affect!(integrator) = (integrator.u[1] += 100)
-cb = DiscreteCallback(condition,affect!)
-sol = solve(prob,MethodOfSteps(Rosenbrock23()),callback=cb,tstops=tstops)
-plot(sol)
-
- - - - -

Part 3: Automatic Differentiation (AD) for Optimization (I)

-

Part 4: Fitting Known Quantities with DiffEqParamEstim.jl + Optim.jl

-

The data was generated with

- - -
-p = (Ka = 0.5, Ke = 0.1, τ = 4.0)
-
- - -
-(Ka = 0.5, Ke = 0.1, τ = 4.0)
-
- - -

Part 5: Implementing Control-Based Logic with ContinuousCallbacks (I)

-

Part 6: Global Sensitivity Analysis with the Morris and Sobol Methods

-

Problem 3: Differential-Algebraic Equation Modeling of a Double Pendulum (B)

-

Part 1: Simple Introduction to DAEs: Mass-Matrix Robertson Equations

-

Part 2: Solving the Implicit Robertson Equations with IDA

-

Part 3: Manual Index Reduction of the Single Pendulum

-

Part 4: Single Pendulum Solution with IDA

-

Part 5: Solving the Double Penulum DAE System

-

Problem 4: Performance Optimizing and Parallelizing Semilinear PDE Solvers (I)

-

Part 1: Implementing the BRUSS PDE System as ODEs

-

Part 2: Optimizing the BRUSS Code

-

Part 3: Exploiting Jacobian Sparsity with Color Differentiation

-

(Optional) Part 4: Structured Jacobians

-

(Optional) Part 5: Automatic Symbolicification and Analytical Jacobian

-

Part 6: Utilizing Preconditioned-GMRES Linear Solvers

-

Part 7: Exploring IMEX and Exponential Integrator Techniques (E)

-

Part 8: Work-Precision Diagrams for Benchmarking Solver Choices

-

Part 9: GPU-Parallelism for PDEs (E)

-

Part 10: Adjoint Sensitivity Analysis for Gradients of PDEs

-

Problem 5: Global Parameter Sensitivity and Optimality with GPU and Distributed Ensembles (B)

-

Part 1: Implementing the Henon-Heiles System (B)

-

(Optional) Part 2: Alternative Dynamical Implmentations of Henon-Heiles (B)

-

Part 3: Parallelized Ensemble Solving

-

Part 4: Parallelized GPU Ensemble Solving

-

Problem 6: Training Neural Stochastic Differential Equations with GPU acceleration (I)

-

Part 1: Constructing and Training a Basic Neural ODE

-

Part 2: GPU-accelerating the Neural ODE Process

-

Part 3: Defining and Training a Mixed Neural ODE

-

Part 4: Constructing a Basic Neural SDE

-

Part 5: Optimizing the training behavior with minibatching (E)

- - - -
- - - -
-
-
- - diff --git a/html/introduction/01-ode_introduction.html b/html/introduction/01-ode_introduction.html deleted file mode 100644 index ef612dcb..00000000 --- a/html/introduction/01-ode_introduction.html +++ /dev/null @@ -1,1739 +0,0 @@ - - - - - - An Intro to DifferentialEquations.jl - - - - - - - - - - - - - - - - - -
-
-
- -
-

An Intro to DifferentialEquations.jl

-
Chris Rackauckas
- -
- -

Basic Introduction Via Ordinary Differential Equations

-

This notebook will get you started with DifferentialEquations.jl by introducing you to the functionality for solving ordinary differential equations (ODEs). The corresponding documentation page is the ODE tutorial. While some of the syntax may be different for other types of equations, the same general principles hold in each case. Our goal is to give a gentle and thorough introduction that highlights these principles in a way that will help you generalize what you have learned.

-

Background

-

If you are new to the study of differential equations, it can be helpful to do a quick background read on the definition of ordinary differential equations. We define an ordinary differential equation as an equation which describes the way that a variable $u$ changes, that is

-

\[ -u' = f(u,p,t) -\]

-

where $p$ are the parameters of the model, $t$ is the time variable, and $f$ is the nonlinear model of how $u$ changes. The initial value problem also includes the information about the starting value:

-

\[ -u(t_0) = u_0 -\]

-

Together, if you know the starting value and you know how the value will change with time, then you know what the value will be at any time point in the future. This is the intuitive definition of a differential equation.

-

First Model: Exponential Growth

-

Our first model will be the canonical exponential growth model. This model says that the rate of change is proportional to the current value, and is this:

-

\[ -u' = au -\]

-

where we have a starting value $u(0)=u_0$. Let's say we put 1 dollar into Bitcoin which is increasing at a rate of $98\%$ per year. Then calling now $t=0$ and measuring time in years, our model is:

-

\[ -u' = 0.98u -\]

-

and $u(0) = 1.0$. We encode this into Julia by noticing that, in this setup, we match the general form when

- - -
-f(u,p,t) = 0.98u
-
- - -
-f (generic function with 1 method)
-
- - -

with $ u_0 = 1.0 $. If we want to solve this model on a time span from t=0.0 to t=1.0, then we define an ODEProblem by specifying this function f, this initial condition u0, and this time span as follows:

- - -
-using DifferentialEquations
-f(u,p,t) = 0.98u
-u0 = 1.0
-tspan = (0.0,1.0)
-prob = ODEProblem(f,u0,tspan)
-
- - -
-ODEProblem with uType Float64 and tType Float64. In-place: false
-timespan: (0.0, 1.0)
-u0: 1.0
-
- - -

To solve our ODEProblem we use the command solve.

- - -
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 5-element Array{Float64,1}:
- 0.0                
- 0.10042494449239292
- 0.3521855598485865 
- 0.6934428591625682 
- 1.0                
-u: 5-element Array{Float64,1}:
- 1.0               
- 1.1034222047865465
- 1.4121902209793713
- 1.9730369896422575
- 2.664456142481387
-
- - -

and that's it: we have succesfully solved our first ODE!

-

Analyzing the Solution

-

Of course, the solution type is not interesting in and of itself. We want to understand the solution! The documentation page which explains in detail the functions for analyzing the solution is the Solution Handling page. Here we will describe some of the basics. You can plot the solution using the plot recipe provided by Plots.jl:

- - -
-using Plots; gr()
-plot(sol)
-
- - - - -

From the picture we see that the solution is an exponential curve, which matches our intuition. As a plot recipe, we can annotate the result using any of the Plots.jl attributes. For example:

- - -
-plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line",
-     xaxis="Time (t)",yaxis="u(t) (in μm)",label="My Thick Line!") # legend=false
-
- - - - -

Using the mutating plot! command we can add other pieces to our plot. For this ODE we know that the true solution is $u(t) = u_0 exp(at)$, so let's add some of the true solution to our plot:

- - -
-plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!")
-
- - - - -

In the previous command I demonstrated sol.t, which grabs the array of time points that the solution was saved at:

- - -
-sol.t
-
- - -
-5-element Array{Float64,1}:
- 0.0                
- 0.10042494449239292
- 0.3521855598485865 
- 0.6934428591625682 
- 1.0
-
- - -

We can get the array of solution values using sol.u:

- - -
-sol.u
-
- - -
-5-element Array{Float64,1}:
- 1.0               
- 1.1034222047865465
- 1.4121902209793713
- 1.9730369896422575
- 2.664456142481387
-
- - -

sol.u[i] is the value of the solution at time sol.t[i]. We can compute arrays of functions of the solution values using standard comprehensions, like:

- - -
-[t+u for (u,t) in tuples(sol)]
-
- - -
-5-element Array{Float64,1}:
- 1.0               
- 1.2038471492789395
- 1.7643757808279579
- 2.666479848804826 
- 3.664456142481387
-
- - -

However, one interesting feature is that, by default, the solution is a continuous function. If we check the print out again:

- - -
-sol
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 5-element Array{Float64,1}:
- 0.0                
- 0.10042494449239292
- 0.3521855598485865 
- 0.6934428591625682 
- 1.0                
-u: 5-element Array{Float64,1}:
- 1.0               
- 1.1034222047865465
- 1.4121902209793713
- 1.9730369896422575
- 2.664456142481387
-
- - -

you see that it says that the solution has a order changing interpolation. The default algorithm automatically switches between methods in order to handle all types of problems. For non-stiff equations (like the one we are solving), it is a continuous function of 4th order accuracy. We can call the solution as a function of time sol(t). For example, to get the value at t=0.45, we can use the command:

- - -
-sol(0.45)
-
- - -
-1.5542610480525971
-
- - -

Controlling the Solver

-

DifferentialEquations.jl has a common set of solver controls among its algorithms which can be found at the Common Solver Options page. We will detail some of the most widely used options.

-

The most useful options are the tolerances abstol and reltol. These tell the internal adaptive time stepping engine how precise of a solution you want. Generally, reltol is the relative accuracy while abstol is the accuracy when u is near zero. These tolerances are local tolerances and thus are not global guarantees. However, a good rule of thumb is that the total solution accuracy is 1-2 digits less than the relative tolerances. Thus for the defaults abstol=1e-6 and reltol=1e-3, you can expect a global accuracy of about 1-2 digits. If we want to get around 6 digits of accuracy, we can use the commands:

- - -
-sol = solve(prob,abstol=1e-8,reltol=1e-8)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 9-element Array{Float64,1}:
- 0.0                
- 0.04127492324135852
- 0.14679466086219672
- 0.2863090396112191 
- 0.438184089090746  
- 0.6118802875301362 
- 0.7985514876572974 
- 0.9993352795953876 
- 1.0                
-u: 9-element Array{Float64,1}:
- 1.0               
- 1.0412786454705882
- 1.1547210130399164
- 1.32390123501071  
- 1.5363667984773475
- 1.8214678404507973
- 2.187108732054802 
- 2.66272111108696  
- 2.6644562419335163
-
- - -

Now we can see no visible difference against the true solution:

- - -
-plot(sol)
-plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!")
-
- - - - -

Notice that by decreasing the tolerance, the number of steps the solver had to take was 9 instead of the previous 5. There is a trade off between accuracy and speed, and it is up to you to determine what is the right balance for your problem.

-

Another common option is to use saveat to make the solver save at specific time points. For example, if we want the solution at an even grid of t=0.1k for integers k, we would use the command:

- - -
-sol = solve(prob,saveat=0.1)
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 11-element Array{Float64,1}:
- 0.0
- 0.1
- 0.2
- 0.3
- 0.4
- 0.5
- 0.6
- 0.7
- 0.8
- 0.9
- 1.0
-u: 11-element Array{Float64,1}:
- 1.0               
- 1.1029627851292922
- 1.2165269512231858
- 1.3417838212289122
- 1.479937951060823 
- 1.63231620704857  
- 1.8003833265032916
- 1.9857565541611835
- 2.1902158127993507
- 2.41572574207719  
- 2.664456142481387
-
- - -

Notice that when saveat is used the continuous output variables are no longer saved and thus sol(t), the interpolation, is only first order. We can save at an uneven grid of points by passing a collection of values to saveat. For example:

- - -
-sol = solve(prob,saveat=[0.2,0.7,0.9])
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 3-element Array{Float64,1}:
- 0.2
- 0.7
- 0.9
-u: 3-element Array{Float64,1}:
- 1.2165269512231858
- 1.9857565541611835
- 2.41572574207719
-
- - -

If we need to reduce the amount of saving, we can also turn off the continuous output directly via dense=false:

- - -
-sol = solve(prob,dense=false)
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 5-element Array{Float64,1}:
- 0.0                
- 0.10042494449239292
- 0.3521855598485865 
- 0.6934428591625682 
- 1.0                
-u: 5-element Array{Float64,1}:
- 1.0               
- 1.1034222047865465
- 1.4121902209793713
- 1.9730369896422575
- 2.664456142481387
-
- - -

and to turn off all intermediate saving we can use save_everystep=false:

- - -
-sol = solve(prob,save_everystep=false)
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 2-element Array{Float64,1}:
- 0.0
- 1.0
-u: 2-element Array{Float64,1}:
- 1.0              
- 2.664456142481387
-
- - -

If we want to solve and only save the final value, we can even set save_start=false.

- - -
-sol = solve(prob,save_everystep=false,save_start = false)
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 1-element Array{Float64,1}:
- 1.0
-u: 1-element Array{Float64,1}:
- 2.664456142481387
-
- - -

Note that similarly on the other side there is save_end=false.

-

More advanced saving behaviors, such as saving functionals of the solution, are handled via the SavingCallback in the Callback Library which will be addressed later in the tutorial.

-

Choosing Solver Algorithms

-

There is no best algorithm for numerically solving a differential equation. When you call solve(prob), DifferentialEquations.jl makes a guess at a good algorithm for your problem, given the properties that you ask for (the tolerances, the saving information, etc.). However, in many cases you may want more direct control. A later notebook will help introduce the various algorithms in DifferentialEquations.jl, but for now let's introduce the syntax.

-

The most crucial determining factor in choosing a numerical method is the stiffness of the model. Stiffness is roughly characterized by a Jacobian f with large eigenvalues. That's quite mathematical, and we can think of it more intuitively: if you have big numbers in f (like parameters of order 1e5), then it's probably stiff. Or, as the creator of the MATLAB ODE Suite, Lawrence Shampine, likes to define it, if the standard algorithms are slow, then it's stiff. We will go into more depth about diagnosing stiffness in a later tutorial, but for now note that if you believe your model may be stiff, you can hint this to the algorithm chooser via alg_hints = [:stiff].

- - -
-sol = solve(prob,alg_hints=[:stiff])
-
- - -
-retcode: Success
-Interpolation: specialized 3rd order "free" stiffness-aware interpolation
-t: 8-element Array{Float64,1}:
- 0.0                
- 0.05653299582822294
- 0.17270897997721946
- 0.3164619936069947 
- 0.5057530766813646 
- 0.7292290122455201 
- 0.9913056881982787 
- 1.0                
-u: 8-element Array{Float64,1}:
- 1.0               
- 1.0569657840332976
- 1.184421874952142 
- 1.3636060527266576
- 1.6415448917417383
- 2.0434588086024563
- 2.641846814956192 
- 2.664452642975646
-
- - -

Stiff algorithms have to solve implicit equations and linear systems at each step so they should only be used when required.

-

If we want to choose an algorithm directly, you can pass the algorithm type after the problem as solve(prob,alg). For example, let's solve this problem using the Tsit5() algorithm, and just for show let's change the relative tolerance to 1e-6 at the same time:

- - -
-sol = solve(prob,Tsit5(),reltol=1e-6)
-
- - -
-retcode: Success
-Interpolation: specialized 4th order "free" interpolation
-t: 10-element Array{Float64,1}:
- 0.0                 
- 0.028970819746309166
- 0.10049166978837214 
- 0.19458902376186224 
- 0.3071721467343173  
- 0.43945340580499864 
- 0.5883428480879211  
- 0.7524861839187198  
- 0.9293007851261506  
- 1.0                 
-u: 10-element Array{Float64,1}:
- 1.0               
- 1.0287982807225062
- 1.103494360777622 
- 1.2100930328474355
- 1.3512481270061714
- 1.5382791211530558
- 1.7799334774107156
- 2.0905693823853637
- 2.486098887385528 
- 2.6644562434913315
-
- - -

Systems of ODEs: The Lorenz Equation

-

Now let's move to a system of ODEs. The Lorenz equation is the famous "butterfly attractor" that spawned chaos theory. It is defined by the system of ODEs:

-

\[ -\begin{align} -\frac{dx}{dt} &= \sigma (y - x)\\ -\frac{dy}{dt} &= x (\rho - z) -y\\ -\frac{dz}{dt} &= xy - \beta z -\end{align} -\]

-

To define a system of differential equations in DifferentialEquations.jl, we define our f as a vector function with a vector initial condition. Thus, for the vector u = [x,y,z]', we have the derivative function:

- - -
-function lorenz!(du,u,p,t)
-    σ,ρ,β = p
-    du[1] = σ*(u[2]-u[1])
-    du[2] = u[1]*(ρ-u[3]) - u[2]
-    du[3] = u[1]*u[2] - β*u[3]
-end
-
- - -
-lorenz! (generic function with 1 method)
-
- - -

Notice here we used the in-place format which writes the output to the preallocated vector du. For systems of equations the in-place format is faster. We use the initial condition $u_0 = [1.0,0.0,0.0]$ as follows:

- - -
-u0 = [1.0,0.0,0.0]
-
- - -
-3-element Array{Float64,1}:
- 1.0
- 0.0
- 0.0
-
- - -

Lastly, for this model we made use of the parameters p. We need to set this value in the ODEProblem as well. For our model we want to solve using the parameters $\sigma = 10$, $\rho = 28$, and $\beta = 8/3$, and thus we build the parameter collection:

- - -
-p = (10,28,8/3) # we could also make this an array, or any other type!
-
- - -
-(10, 28, 2.6666666666666665)
-
- - -

Now we generate the ODEProblem type. In this case, since we have parameters, we add the parameter values to the end of the constructor call. Let's solve this on a time span of t=0 to t=100:

- - -
-tspan = (0.0,100.0)
-prob = ODEProblem(lorenz!,u0,tspan,p)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 100.0)
-u0: [1.0, 0.0, 0.0]
-
- - -

Now, just as before, we solve the problem:

- - -
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 1250-element Array{Float64,1}:
-   0.0                  
-   3.5678604836301404e-5
-   0.0003924646531993154
-   0.0032623883208835647
-   0.00905805935549092  
-   0.016956466266909925 
-   0.027689961278342563 
-   0.041856290192165115 
-   0.06024018681535046  
-   0.0836851555247397   
-   ⋮                    
-  99.50064429327207     
-  99.56497345673458     
-  99.62788705014984     
-  99.6991013016854      
-  99.75654880247485     
-  99.81017638953824     
-  99.87131062092273     
-  99.93558797583201     
- 100.0                  
-u: 1250-element Array{Array{Float64,1},1}:
- [1.0, 0.0, 0.0]                    
- [0.999643, 0.000998805, 1.78143e-8]
- [0.996105, 0.0109654, 2.14696e-6]  
- [0.969359, 0.0897701, 0.0001438]   
- [0.924204, 0.242289, 0.00104616]   
- [0.880046, 0.438736, 0.00342426]   
- [0.848331, 0.691563, 0.00848763]   
- [0.849504, 1.01454, 0.018212]      
- [0.913906, 1.44255, 0.0366935]     
- [1.08886, 2.05232, 0.0740252]      
- ⋮                                  
- [8.87662, 1.1596, 35.1377]         
- [4.55579, -0.800246, 29.5784]      
- [2.06483, -0.641055, 24.865]       
- [0.835596, -0.129419, 20.5289]     
- [0.493369, 0.189755, 17.614]       
- [0.425759, 0.448441, 15.274]       
- [0.520676, 0.802846, 12.9926]      
- [0.797852, 1.39909, 10.9881]       
- [1.34105, 2.47931, 9.37471]
-
- - -

The same solution handling features apply to this case. Thus sol.t stores the time points and sol.u is an array storing the solution at the corresponding time points.

-

However, there are a few extra features which are good to know when dealing with systems of equations. First of all, sol also acts like an array. sol[i] returns the solution at the ith time point.

- - -
-sol.t[10],sol[10]
-
- - -
-(0.0836851555247397, [1.08886, 2.05232, 0.0740252])
-
- - -

Additionally, the solution acts like a matrix where sol[j,i] is the value of the jth variable at time i:

- - -
-sol[2,10]
-
- - -
-2.0523193075036916
-
- - -

We can get a real matrix by performing a conversion:

- - -
-A = Array(sol)
-
- - -
-3×1250 Array{Float64,2}:
- 1.0  0.999643     0.996105    0.969359   …   0.520676   0.797852  1.34105
- 0.0  0.000998805  0.0109654   0.0897701      0.802846   1.39909   2.47931
- 0.0  1.78143e-8   2.14696e-6  0.0001438     12.9926    10.9881    9.37471
-
- - -

This is the same as sol, i.e. sol[i,j] = A[i,j], but now it's a true matrix. Plotting will by default show the time series for each variable:

- - -
-plot(sol)
-
- - - - -

If we instead want to plot values against each other, we can use the vars command. Let's plot variable 1 against variable 2 against variable 3:

- - -
-plot(sol,vars=(1,2,3))
-
- - - - -

This is the classic Lorenz attractor plot, where the x axis is u[1], the y axis is u[2], and the z axis is u[3]. Note that the plot recipe by default uses the interpolation, but we can turn this off:

- - -
-plot(sol,vars=(1,2,3),denseplot=false)
-
- - - - -

Yikes! This shows how calculating the continuous solution has saved a lot of computational effort by computing only a sparse solution and filling in the values! Note that in vars, 0=time, and thus we can plot the time series of a single component like:

- - -
-plot(sol,vars=(0,2))
-
- - - - -

A DSL for Parameterized Functions

-

In many cases you may be defining a lot of functions with parameters. There exists the domain-specific language (DSL) defined by the @ode_def macro for helping with this common problem. For example, we can define the Lotka-Volterra equation:

-

\[ -\begin{align} -\frac{dx}{dt} &= ax - bxy\\ -\frac{dy}{dt} &= -cy + dxy -\end{align} -\]

-

as follows:

- - -
-function lotka_volterra!(du,u,p,t)
-  du[1] = p[1]*u[1] - p[2]*u[1]*u[2]
-  du[2] = -p[3]*u[2] + p[4]*u[1]*u[2]
-end
-
- - -
-lotka_volterra! (generic function with 1 method)
-
- - -

However, that can be hard to follow since there's a lot of "programming" getting in the way. Instead, you can use the @ode_def macro from ParameterizedFunctions.jl:

- - -
-using ParameterizedFunctions
-lv! = @ode_def LotkaVolterra begin
-  dx = a*x - b*x*y
-  dy = -c*y + d*x*y
-end a b c d
-
- - -
-(::Main.WeaveSandBox2.LotkaVolterra{getfield(Main.WeaveSandBox2, Symbol("##
-7#11")),getfield(Main.WeaveSandBox2, Symbol("##8#12")),getfield(Main.WeaveS
-andBox2, Symbol("##9#13")),Nothing,Nothing,getfield(Main.WeaveSandBox2, Sym
-bol("##10#14")),Expr,Expr}) (generic function with 2 methods)
-
- - -

We can then use the result just like an ODE function from before:

- - -
-u0 = [1.0,1.0]
-p = (1.5,1.0,3.0,1.0)
-tspan = (0.0,10.0)
-prob = ODEProblem(lv!,u0,tspan,p)
-sol = solve(prob)
-plot(sol)
-
- - - - -

Not only is the DSL convenient syntax, but it does some magic behind the scenes. For example, further parts of the tutorial will describe how solvers for stiff differential equations have to make use of the Jacobian in calculations. Here, the DSL uses symbolic differentiation to automatically derive that function:

- - -
-lv!.Jex
-
- - -
-quote
-    internal_var___J[1, 1] = internal_var___p[1] - internal_var___p[2] * in
-ternal_var___u[2]
-    internal_var___J[1, 2] = -(internal_var___p[2]) * internal_var___u[1]
-    internal_var___J[2, 1] = internal_var___p[4] * internal_var___u[2]
-    internal_var___J[2, 2] = -(internal_var___p[3]) + internal_var___p[4] *
- internal_var___u[1]
-    nothing
-end
-
- - -

The DSL can derive many other functions; this ability is used to speed up the solvers. An extension to DifferentialEquations.jl, Latexify.jl, allows you to extract these pieces as LaTeX expressions.

-

Internal Types

-

The last basic user-interface feature to explore is the choice of types. DifferentialEquations.jl respects your input types to determine the internal types that are used. Thus since in the previous cases, when we used Float64 values for the initial condition, this meant that the internal values would be solved using Float64. We made sure that time was specified via Float64 values, meaning that time steps would utilize 64-bit floats as well. But, by simply changing these types we can change what is used internally.

-

As a quick example, let's say we want to solve an ODE defined by a matrix. To do this, we can simply use a matrix as input.

- - -
-A  = [1. 0  0 -5
-      4 -2  4 -3
-     -4  0  0  1
-      5 -2  2  3]
-u0 = rand(4,2)
-tspan = (0.0,1.0)
-f(u,p,t) = A*u
-prob = ODEProblem(f,u0,tspan)
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 10-element Array{Float64,1}:
- 0.0                
- 0.04330119198184582
- 0.10983948286225614
- 0.18801803393561917
- 0.2866240584151791 
- 0.40906989372159136
- 0.5511592223989411 
- 0.704033661613616  
- 0.8632290632699496 
- 1.0                
-u: 10-element Array{Array{Float64,2},1}:
- [0.425892 0.260488; 0.428294 0.452756; 0.440276 0.234219; 0.972781 0.96522
-3] 
- [0.205955 0.0411449; 0.382764 0.350298; 0.431521 0.252682; 1.1843 1.11992]
-   
- [-0.237451 -0.376748; 0.199172 0.112611; 0.521557 0.376699; 1.47012 1.3198
-]  
- [-0.908936 -0.979168; -0.153697 -0.253996; 0.82237 0.695871; 1.72456 1.479
-84]
- [-1.94502 -1.86916; -0.70898 -0.759724; 1.5587 1.40392; 1.87843 1.53404]  
-   
- [-3.39785 -3.06027; -1.3259 -1.24474; 3.08878 2.78851; 1.73244 1.31272]   
-   
- [-5.00169 -4.28837; -1.45563 -1.20686; 5.69091 5.03387; 0.978203 0.569233]
-   
- [-6.04268 -4.92898; -0.180054 0.118958; 9.1593 7.88829; -0.644615 -0.88556
-8] 
- [-5.50386 -4.13384; 3.45481 3.46671; 12.6522 10.5655; -3.233 -3.09436]    
-   
- [-2.96499 -1.66434; 8.7865 8.1325; 14.443 11.6611; -6.02482 -5.39004]
-
- - -

There is no real difference from what we did before, but now in this case u0 is a 4x2 matrix. Because of that, the solution at each time point is matrix:

- - -
-sol[3]
-
- - -
-4×2 Array{Float64,2}:
- -0.237451  -0.376748
-  0.199172   0.112611
-  0.521557   0.376699
-  1.47012    1.3198
-
- - -

In DifferentialEquations.jl, you can use any type that defines +, -, *, /, and has an appropriate norm. For example, if we want arbitrary precision floating point numbers, we can change the input to be a matrix of BigFloat:

- - -
-big_u0 = big.(u0)
-
- - -
-4×2 Array{BigFloat,2}:
- 0.425892  0.260488
- 0.428294  0.452756
- 0.440276  0.234219
- 0.972781  0.965223
-
- - -

and we can solve the ODEProblem with arbitrary precision numbers by using that initial condition:

- - -
-prob = ODEProblem(f,big_u0,tspan)
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 6-element Array{Float64,1}:
- 0.0                
- 0.12950311238875903
- 0.3776286867278208 
- 0.6826307059183888 
- 0.9970081360265544 
- 1.0                
-u: 6-element Array{Array{BigFloat,2},1}:
- [0.425892 0.260488; 0.428294 0.452756; 0.440276 0.234219; 0.972781 0.96522
-3] 
- [-0.391808 -0.517723; 0.122299 0.0273673; 0.575874 0.438266; 1.54336 1.368
-42]
- [-3.01811 -2.75499; -1.19358 -1.1508; 2.62955 2.38001; 1.81053 1.40378]   
-   
- [-5.96797 -4.90523; -0.474788 -0.168012; 8.65576 7.48346; -0.36485 -0.6404
-1] 
- [-3.04563 -1.73948; 8.64814 8.01353; 14.425 11.6568; -5.9603 -5.33791]    
-   
- [-2.96497 -1.66433; 8.78654 8.13253; 14.4431 11.6611; -6.02484 -5.39005]
-
- - - -
-sol[1,3]
-
- - -
--3.018112846173883596414677502392249031603692933761110249554752186715212119
-276106
-
- - -

To really make use of this, we would want to change abstol and reltol to be small! Notice that the type for "time" is different than the type for the dependent variables, and this can be used to optimize the algorithm via keeping multiple precisions. We can convert time to be arbitrary precision as well by defining our time span with BigFloat variables:

- - -
-prob = ODEProblem(f,big_u0,big.(tspan))
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 6-element Array{BigFloat,1}:
- 0.0                                                                       
-       
- 0.129503112388759025420352070500060404254736837969783646324403117273289130
-0107554
- 0.377628686727820828392763533162396453198998194550909011512149758140026290
-6102944
- 0.682630705918388891886570361685113156259420847153670634093012746413852003
-6864237
- 0.997008136026554409331084788629910535308360292831171029276156141723398349
-3437613
- 1.0                                                                       
-       
-u: 6-element Array{Array{BigFloat,2},1}:
- [0.425892 0.260488; 0.428294 0.452756; 0.440276 0.234219; 0.972781 0.96522
-3] 
- [-0.391808 -0.517723; 0.122299 0.0273673; 0.575874 0.438266; 1.54336 1.368
-42]
- [-3.01811 -2.75499; -1.19358 -1.1508; 2.62955 2.38001; 1.81053 1.40378]   
-   
- [-5.96797 -4.90523; -0.474788 -0.168012; 8.65576 7.48346; -0.36485 -0.6404
-1] 
- [-3.04563 -1.73948; 8.64814 8.01353; 14.425 11.6568; -5.9603 -5.33791]    
-   
- [-2.96497 -1.66433; 8.78654 8.13253; 14.4431 11.6611; -6.02484 -5.39005]
-
- - -

Let's end by showing a more complicated use of types. For small arrays, it's usually faster to do operations on static arrays via the package StaticArrays.jl. The syntax is similar to that of normal arrays, but for these special arrays we utilize the @SMatrix macro to indicate we want to create a static array.

- - -
-using StaticArrays
-A  = @SMatrix [ 1.0  0.0 0.0 -5.0
-                4.0 -2.0 4.0 -3.0
-               -4.0  0.0 0.0  1.0
-                5.0 -2.0 2.0  3.0]
-u0 = @SMatrix rand(4,2)
-tspan = (0.0,1.0)
-f(u,p,t) = A*u
-prob = ODEProblem(f,u0,tspan)
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 11-element Array{Float64,1}:
- 0.0                
- 0.0336160180207602 
- 0.08412997362621587
- 0.14806631456943903
- 0.22433133614380857
- 0.31990966213948546
- 0.4400739108897834 
- 0.5832156804749542 
- 0.7416649809662037 
- 0.9061477391769605 
- 1.0                
-u: 11-element Array{StaticArrays.SArray{Tuple{4,2},Float64,2,8},1}:
- [0.43626 0.915419; 0.891509 0.380879; 0.0155531 0.332733; 0.516733 0.98606
-2]    
- [0.357329 0.757107; 0.831212 0.394842; -0.0194905 0.257073; 0.580784 1.232
-47]   
- [0.214081 0.43128; 0.709436 0.311625; -0.0461453 0.206495; 0.666519 1.5845
-5]    
- [-0.006741 -0.130363; 0.510945 0.0442428; -0.0280178 0.278947; 0.753493 1.
-98499]
- [-0.320198 -1.00634; 0.230087 -0.465588; 0.0808634 0.613692; 0.819292 2.36
-965]  
- [-0.769708 -2.37667; -0.143448 -1.27988; 0.36721 1.49398; 0.831372 2.65613
-]     
- [-1.37031 -4.38711; -0.543736 -2.2827; 0.975863 3.43225; 0.713097 2.6038] 
-      
- [-2.00833 -6.81153; -0.718739 -2.79939; 2.03013 6.97404; 0.344448 1.76747]
-      
- [-2.36988 -8.71156; -0.235017 -1.44401; 3.44643 12.0958; -0.380711 -0.3405
-71]   
- [-2.00395 -8.5107; 1.30505 3.32618; 4.78979 17.5928; -1.46597 -3.91971]   
-      
- [-1.3054 -6.80442; 2.70972 7.9245; 5.25233 20.0236; -2.19586 -6.51796]
-
- - - -
-sol[3]
-
- - -
-4×2 StaticArrays.SArray{Tuple{4,2},Float64,2,8}:
-  0.214081   0.43128 
-  0.709436   0.311625
- -0.0461453  0.206495
-  0.666519   1.58455
-
- - -

Conclusion

-

These are the basic controls in DifferentialEquations.jl. All equations are defined via a problem type, and the solve command is used with an algorithm choice (or the default) to get a solution. Every solution acts the same, like an array sol[i] with sol.t[i], and also like a continuous function sol(t) with a nice plot command plot(sol). The Common Solver Options can be used to control the solver for any equation type. Lastly, the types used in the numerical solving are determined by the input types, and this can be used to solve with arbitrary precision and add additional optimizations (this can be used to solve via GPUs for example!). While this was shown on ODEs, these techniques generalize to other types of equations as well.

- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("introduction","01-ode_introduction.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/introduction/02-choosing_algs.html b/html/introduction/02-choosing_algs.html deleted file mode 100644 index 3eee6169..00000000 --- a/html/introduction/02-choosing_algs.html +++ /dev/null @@ -1,1060 +0,0 @@ - - - - - - Choosing an ODE Algorithm - - - - - - - - - - - - - - - - - -
-
-
- -
-

Choosing an ODE Algorithm

-
Chris Rackauckas
- -
- -

While the default algorithms, along with alg_hints = [:stiff], will suffice in most cases, there are times when you may need to exert more control. The purpose of this part of the tutorial is to introduce you to some of the most widely used algorithm choices and when they should be used. The corresponding page of the documentation is the ODE Solvers page which goes into more depth.

-

Diagnosing Stiffness

-

One of the key things to know for algorithm choices is whether your problem is stiff. Let's take for example the driven Van Der Pol equation:

- - -
-using DifferentialEquations, ParameterizedFunctions
-van! = @ode_def VanDerPol begin
-  dy = μ*((1-x^2)*y - x)
-  dx = 1*y
-end μ
-
-prob = ODEProblem(van!,[0.0,2.0],(0.0,6.3),1e6)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 6.3)
-u0: [0.0, 2.0]
-
- - -

One indicating factor that should alert you to the fact that this model may be stiff is the fact that the parameter is 1e6: large parameters generally mean stiff models. If we try to solve this with the default method:

- - -
-sol = solve(prob,Tsit5())
-
- - -
-retcode: MaxIters
-Interpolation: specialized 4th order "free" interpolation
-t: 999978-element Array{Float64,1}:
- 0.0                  
- 4.997501249375313e-10
- 5.4972513743128435e-9
- 3.28990927256137e-8  
- 9.055577676821075e-8 
- 1.7309485648570045e-7
- 2.793754678038464e-7 
- 4.1495260542675094e-7
- 5.807908778765186e-7 
- 7.812798295243245e-7 
- ⋮                    
- 1.8458616477168546   
- 1.845863136999449    
- 1.8458646262847271   
- 1.8458661155726892   
- 1.8458676048633353   
- 1.8458690941566653   
- 1.8458705834526792   
- 1.845872072751377    
- 1.8458735620527589   
-u: 999978-element Array{Array{Float64,1},1}:
- [0.0, 2.0]          
- [-0.000998751, 2.0] 
- [-0.0109043, 2.0]   
- [-0.0626554, 2.0]   
- [-0.158595, 2.0]    
- [-0.270036, 2.0]    
- [-0.37832, 2.0]     
- [-0.474679, 2.0]    
- [-0.54993, 2.0]     
- [-0.602693, 2.0]    
- ⋮                   
- [-0.777547, 1.83159]
- [-0.777548, 1.83159]
- [-0.777549, 1.83159]
- [-0.77755, 1.83159] 
- [-0.777551, 1.83159]
- [-0.777552, 1.83158]
- [-0.777553, 1.83158]
- [-0.777553, 1.83158]
- [-0.777554, 1.83158]
-
- - -

Here it shows that maximum iterations were reached. Another thing that can happen is that the solution can return that the solver was unstable (exploded to infinity) or that dt became too small. If these happen, the first thing to do is to check that your model is correct. It could very well be that you made an error that causes the model to be unstable!

-

If the model is the problem, then stiffness could be the reason. We can thus hint to the solver to use an appropriate method:

- - -
-sol = solve(prob,alg_hints = [:stiff])
-
- - -
-retcode: Success
-Interpolation: specialized 3rd order "free" stiffness-aware interpolation
-t: 695-element Array{Float64,1}:
- 0.0                  
- 4.997501249375313e-10
- 5.454138614593668e-9 
- 1.8954284827811007e-8
- 4.1496551232327575e-8
- 7.308066628216586e-8 
- 1.1714615060776353e-7
- 1.7481240480546338e-7
- 2.4862277925930763e-7
- 3.4025374895995275e-7
- ⋮                    
- 5.7409760021041745   
- 5.801110722137093    
- 5.8746506588671075   
- 5.955930645265512    
- 6.042472092689859    
- 6.129115709541026    
- 6.215759326392192    
- 6.287868297594483    
- 6.3                  
-u: 695-element Array{Array{Float64,1},1}:
- [0.0, 2.0]          
- [-0.000998751, 2.0] 
- [-0.0108195, 2.0]   
- [-0.0368509, 2.0]   
- [-0.0780351, 2.0]   
- [-0.131248, 2.0]    
- [-0.19755, 2.0]     
- [-0.272074, 2.0]    
- [-0.350452, 2.0]    
- [-0.426453, 2.0]    
- ⋮                   
- [0.703333, -1.93784]
- [0.731566, -1.89471]
- [0.771692, -1.83948]
- [0.825655, -1.77465]
- [0.899292, -1.70015]
- [0.999836, -1.61812]
- [1.14931, -1.5255]  
- [1.35191, -1.43593] 
- [1.39928, -1.41925]
-
- - -

Or we can use the default algorithm. By default, DifferentialEquations.jl uses algorithms like AutoTsit5(Rodas5()) which automatically detect stiffness and switch to an appropriate method once stiffness is known.

- - -
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 1927-element Array{Float64,1}:
- 0.0                  
- 4.997501249375313e-10
- 5.4972513743128435e-9
- 3.28990927256137e-8  
- 9.055577676821075e-8 
- 1.7309485648570045e-7
- 2.793754678038464e-7 
- 4.1495260542675094e-7
- 5.807908778765186e-7 
- 7.812798295243245e-7 
- ⋮                    
- 6.204647119899009    
- 6.219555079521211    
- 6.233840699473001    
- 6.247503397359622    
- 6.260546169082511    
- 6.272975181001707    
- 6.284799378478759    
- 6.296030113796843    
- 6.3                  
-u: 1927-element Array{Array{Float64,1},1}:
- [0.0, 2.0]         
- [-0.000998751, 2.0]
- [-0.0109043, 2.0]  
- [-0.0626554, 2.0]  
- [-0.158595, 2.0]   
- [-0.270036, 2.0]   
- [-0.37832, 2.0]    
- [-0.474679, 2.0]   
- [-0.54993, 2.0]    
- [-0.602693, 2.0]   
- ⋮                  
- [1.11731, -1.54298]
- [1.14817, -1.5261] 
- [1.1805, -1.50946] 
- [1.21435, -1.49311]
- [1.24979, -1.47704]
- [1.28689, -1.46128]
- [1.3257, -1.44583] 
- [1.36632, -1.43072]
- [1.38188, -1.42526]
-
- - -

Another way to understand stiffness is to look at the solution.

- - -
-using Plots; gr()
-sol = solve(prob,alg_hints = [:stiff],reltol=1e-6)
-plot(sol,denseplot=false)
-
- - - - -

Let's zoom in on the y-axis to see what's going on:

- - -
-plot(sol,ylims = (-10.0,10.0))
-
- - - - -

Notice how there are some extreme vertical shifts that occur. These vertical shifts are places where the derivative term is very large, and this is indicative of stiffness. This is an extreme example to highlight the behavior, but this general idea can be carried over to your problem. When in doubt, simply try timing using both a stiff solver and a non-stiff solver and see which is more efficient.

-

To try this out, let's use BenchmarkTools, a package that let's us relatively reliably time code blocks.

- - -
-function lorenz!(du,u,p,t)
-    σ,ρ,β = p
-    du[1] = σ*(u[2]-u[1])
-    du[2] = u[1]*(ρ-u[3]) - u[2]
-    du[3] = u[1]*u[2] - β*u[3]
-end
-u0 = [1.0,0.0,0.0]
-p = (10,28,8/3)
-tspan = (0.0,100.0)
-prob = ODEProblem(lorenz!,u0,tspan,p)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 100.0)
-u0: [1.0, 0.0, 0.0]
-
- - -

And now, let's use the @btime macro from benchmark tools to compare the use of non-stiff and stiff solvers on this problem.

- - -
-using BenchmarkTools
-@btime solve(prob);
-
- - -
-995.395 μs (12678 allocations: 1.37 MiB)
-
- - - -
-@btime solve(prob,alg_hints = [:stiff]);
-
- - -
-10.343 ms (38999 allocations: 2.23 MiB)
-
- - -

In this particular case, we can see that non-stiff solvers get us to the solution much more quickly.

-

The Recommended Methods

-

When picking a method, the general rules are as follows:

-
    -
  • Higher order is more efficient at lower tolerances, lower order is more efficient at higher tolerances

    -
  • -
  • Adaptivity is essential in most real-world scenarios

    -
  • -
  • Runge-Kutta methods do well with non-stiff equations, Rosenbrock methods do well with small stiff equations, BDF methods do well with large stiff equations

    -
  • -
-

While there are always exceptions to the rule, those are good guiding principles. Based on those, a simple way to choose methods is:

-
    -
  • The default is Tsit5(), a non-stiff Runge-Kutta method of Order 5

    -
  • -
  • If you use low tolerances (1e-8), try Vern7() or Vern9()

    -
  • -
  • If you use high tolerances, try BS3()

    -
  • -
  • If the problem is stiff, try Rosenbrock23(), Rodas5(), or CVODE_BDF()

    -
  • -
  • If you don't know, use AutoTsit5(Rosenbrock23()) or AutoVern9(Rodas5()).

    -
  • -
-

(This is a simplified version of the default algorithm chooser)

-

Comparison to other Software

-

If you are familiar with MATLAB, SciPy, or R's DESolve, here's a quick translation start to have transfer your knowledge over.

-
    -
  • ode23 -> BS3()

    -
  • -
  • ode45/dopri5 -> DP5(), though in most cases Tsit5() is more efficient

    -
  • -
  • ode23s -> Rosenbrock23(), though in most cases Rodas4() is more efficient

    -
  • -
  • ode113 -> VCABM(), though in many cases Vern7() is more efficient

    -
  • -
  • dop853 -> DP8(), though in most cases Vern7() is more efficient

    -
  • -
  • ode15s/vode -> QNDF(), though in many cases CVODE_BDF(), Rodas4() or radau() are more efficient

    -
  • -
  • ode23t -> Trapezoid() for efficiency and GenericTrapezoid() for robustness

    -
  • -
  • ode23tb -> TRBDF2

    -
  • -
  • lsoda -> lsoda() (requires ]add LSODA; using LSODA)

    -
  • -
  • ode15i -> IDA(), though in many cases Rodas4() can handle the DAE and is significantly more efficient

    -
  • -
- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("introduction","02-choosing_algs.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/introduction/03-optimizing_diffeq_code.html b/html/introduction/03-optimizing_diffeq_code.html deleted file mode 100644 index 5f322955..00000000 --- a/html/introduction/03-optimizing_diffeq_code.html +++ /dev/null @@ -1,1784 +0,0 @@ - - - - - - Optimizing DiffEq Code - - - - - - - - - - - - - - - - - -
-
-
- -
-

Optimizing DiffEq Code

-
Chris Rackauckas
- -
- -

In this notebook we will walk through some of the main tools for optimizing your code in order to efficiently solve DifferentialEquations.jl. User-side optimizations are important because, for sufficiently difficult problems, most of the time will be spent inside of your f function, the function you are trying to solve. "Efficient" integrators are those that reduce the required number of f calls to hit the error tolerance. The main ideas for optimizing your DiffEq code, or any Julia function, are the following:

-
    -
  • Make it non-allocating

    -
  • -
  • Use StaticArrays for small arrays

    -
  • -
  • Use broadcast fusion

    -
  • -
  • Make it type-stable

    -
  • -
  • Reduce redundant calculations

    -
  • -
  • Make use of BLAS calls

    -
  • -
  • Optimize algorithm choice

    -
  • -
-

We'll discuss these strategies in the context of small and large systems. Let's start with small systems.

-

Optimizing Small Systems (<100 DEs)

-

Let's take the classic Lorenz system from before. Let's start by naively writing the system in its out-of-place form:

- - -
-function lorenz(u,p,t)
- dx = 10.0*(u[2]-u[1])
- dy = u[1]*(28.0-u[3]) - u[2]
- dz = u[1]*u[2] - (8/3)*u[3]
- [dx,dy,dz]
-end
-
- - -
-lorenz (generic function with 1 method)
-
- - -

Here, lorenz returns an object, [dx,dy,dz], which is created within the body of lorenz.

-

This is a common code pattern from high-level languages like MATLAB, SciPy, or R's deSolve. However, the issue with this form is that it allocates a vector, [dx,dy,dz], at each step. Let's benchmark the solution process with this choice of function:

- - -
-using DifferentialEquations, BenchmarkTools
-u0 = [1.0;0.0;0.0]
-tspan = (0.0,100.0)
-prob = ODEProblem(lorenz,u0,tspan)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  10.63 MiB
-  allocs estimate:  99631
-  --------------
-  minimum time:     3.325 ms (0.00% GC)
-  median time:      3.983 ms (0.00% GC)
-  mean time:        8.005 ms (53.49% GC)
-  maximum time:     16.798 ms (58.35% GC)
-  --------------
-  samples:          624
-  evals/sample:     1
-
- - -

The BenchmarkTools package's @benchmark runs the code multiple times to get an accurate measurement. The minimum time is the time it takes when your OS and other background processes aren't getting in the way. Notice that in this case it takes about 5ms to solve and allocates around 11.11 MiB. However, if we were to use this inside of a real user code we'd see a lot of time spent doing garbage collection (GC) to clean up all of the arrays we made. Even if we turn off saving we have these allocations.

- - -
-@benchmark solve(prob,Tsit5(),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  9.30 MiB
-  allocs estimate:  87129
-  --------------
-  minimum time:     2.885 ms (0.00% GC)
-  median time:      3.157 ms (0.00% GC)
-  mean time:        6.832 ms (52.67% GC)
-  maximum time:     16.506 ms (58.01% GC)
-  --------------
-  samples:          731
-  evals/sample:     1
-
- - -

The problem of course is that arrays are created every time our derivative function is called. This function is called multiple times per step and is thus the main source of memory usage. To fix this, we can use the in-place form to ***make our code non-allocating***:

- - -
-function lorenz!(du,u,p,t)
- du[1] = 10.0*(u[2]-u[1])
- du[2] = u[1]*(28.0-u[3]) - u[2]
- du[3] = u[1]*u[2] - (8/3)*u[3]
-end
-
- - -
-lorenz! (generic function with 1 method)
-
- - -

Here, instead of creating an array each time, we utilized the cache array du. When the inplace form is used, DifferentialEquations.jl takes a different internal route that minimizes the internal allocations as well. When we benchmark this function, we will see quite a difference.

- - -
-u0 = [1.0;0.0;0.0]
-tspan = (0.0,100.0)
-prob = ODEProblem(lorenz!,u0,tspan)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  1.34 MiB
-  allocs estimate:  12593
-  --------------
-  minimum time:     874.414 μs (0.00% GC)
-  median time:      902.574 μs (0.00% GC)
-  mean time:        1.414 ms (34.51% GC)
-  maximum time:     10.179 ms (89.45% GC)
-  --------------
-  samples:          3526
-  evals/sample:     1
-
- - - -
-@benchmark solve(prob,Tsit5(),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  6.86 KiB
-  allocs estimate:  91
-  --------------
-  minimum time:     477.740 μs (0.00% GC)
-  median time:      481.344 μs (0.00% GC)
-  mean time:        485.000 μs (0.58% GC)
-  maximum time:     9.914 ms (94.58% GC)
-  --------------
-  samples:          10000
-  evals/sample:     1
-
- - -

There is a 4x time difference just from that change! Notice there are still some allocations and this is due to the construction of the integration cache. But this doesn't scale with the problem size:

- - -
-tspan = (0.0,500.0) # 5x longer than before
-prob = ODEProblem(lorenz!,u0,tspan)
-@benchmark solve(prob,Tsit5(),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  6.86 KiB
-  allocs estimate:  91
-  --------------
-  minimum time:     2.466 ms (0.00% GC)
-  median time:      2.481 ms (0.00% GC)
-  mean time:        2.485 ms (0.00% GC)
-  maximum time:     2.909 ms (0.00% GC)
-  --------------
-  samples:          2010
-  evals/sample:     1
-
- - -

since that's all just setup allocations.

-

But if the system is small we can optimize even more.

-

Allocations are only expensive if they are "heap allocations". For a more in-depth definition of heap allocations, there are a lot of sources online. But a good working definition is that heap allocations are variable-sized slabs of memory which have to be pointed to, and this pointer indirection costs time. Additionally, the heap has to be managed and the garbage controllers has to actively keep track of what's on the heap.

-

However, there's an alternative to heap allocations, known as stack allocations. The stack is statically-sized (known at compile time) and thus its accesses are quick. Additionally, the exact block of memory is known in advance by the compiler, and thus re-using the memory is cheap. This means that allocating on the stack has essentially no cost!

-

Arrays have to be heap allocated because their size (and thus the amount of memory they take up) is determined at runtime. But there are structures in Julia which are stack-allocated. structs for example are stack-allocated "value-type"s. Tuples are a stack-allocated collection. The most useful data structure for DiffEq though is the StaticArray from the package StaticArrays.jl. These arrays have their length determined at compile-time. They are created using macros attached to normal array expressions, for example:

- - -
-using StaticArrays
-A = @SVector [2.0,3.0,5.0]
-
- - -
-3-element StaticArrays.SArray{Tuple{3},Float64,1,3}:
- 2.0
- 3.0
- 5.0
-
- - -

Notice that the 3 after SVector gives the size of the SVector. It cannot be changed. Additionally, SVectors are immutable, so we have to create a new SVector to change values. But remember, we don't have to worry about allocations because this data structure is stack-allocated. SArrays have a lot of extra optimizations as well: they have fast matrix multiplication, fast QR factorizations, etc. which directly make use of the information about the size of the array. Thus, when possible they should be used.

-

Unfortunately static arrays can only be used for sufficiently small arrays. After a certain size, they are forced to heap allocate after some instructions and their compile time balloons. Thus static arrays shouldn't be used if your system has more than 100 variables. Additionally, only the native Julia algorithms can fully utilize static arrays.

-

Let's ***optimize lorenz using static arrays***. Note that in this case, we want to use the out-of-place allocating form, but this time we want to output a static array:

- - -
-function lorenz_static(u,p,t)
- dx = 10.0*(u[2]-u[1])
- dy = u[1]*(28.0-u[3]) - u[2]
- dz = u[1]*u[2] - (8/3)*u[3]
- @SVector [dx,dy,dz]
-end
-
- - -
-lorenz_static (generic function with 1 method)
-
- - -

To make the solver internally use static arrays, we simply give it a static array as the initial condition:

- - -
-u0 = @SVector [1.0,0.0,0.0]
-tspan = (0.0,100.0)
-prob = ODEProblem(lorenz_static,u0,tspan)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  461.59 KiB
-  allocs estimate:  2583
-  --------------
-  minimum time:     498.084 μs (0.00% GC)
-  median time:      504.044 μs (0.00% GC)
-  mean time:        602.322 μs (15.86% GC)
-  maximum time:     5.568 ms (89.09% GC)
-  --------------
-  samples:          8267
-  evals/sample:     1
-
- - - -
-@benchmark solve(prob,Tsit5(),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  6.16 KiB
-  allocs estimate:  73
-  --------------
-  minimum time:     399.249 μs (0.00% GC)
-  median time:      404.952 μs (0.00% GC)
-  mean time:        407.993 μs (0.43% GC)
-  maximum time:     9.236 ms (95.17% GC)
-  --------------
-  samples:          10000
-  evals/sample:     1
-
- - -

And that's pretty much all there is to it. With static arrays you don't have to worry about allocating, so use operations like * and don't worry about fusing operations (discussed in the next section). Do "the vectorized code" of R/MATLAB/Python and your code in this case will be fast, or directly use the numbers/values.

-

Exercise 1

-

Implement the out-of-place array, in-place array, and out-of-place static array forms for the Henon-Heiles System and time the results.

-

Optimizing Large Systems

-

Interlude: Managing Allocations with Broadcast Fusion

-

When your system is sufficiently large, or you have to make use of a non-native Julia algorithm, you have to make use of Arrays. In order to use arrays in the most efficient manner, you need to be careful about temporary allocations. Vectorized calculations naturally have plenty of temporary array allocations. This is because a vectorized calculation outputs a vector. Thus:

- - -
-A = rand(1000,1000); B = rand(1000,1000); C = rand(1000,1000)
-test(A,B,C) = A + B + C
-@benchmark test(A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  7.63 MiB
-  allocs estimate:  3
-  --------------
-  minimum time:     3.327 ms (0.00% GC)
-  median time:      3.390 ms (0.00% GC)
-  mean time:        4.734 ms (29.21% GC)
-  maximum time:     7.540 ms (56.46% GC)
-  --------------
-  samples:          1053
-  evals/sample:     1
-
- - -

That expression A + B + C creates 2 arrays. It first creates one for the output of A + B, then uses that result array to + C to get the final result. 2 arrays! We don't want that! The first thing to do to fix this is to use broadcast fusion. Broadcast fusion puts expressions together. For example, instead of doing the + operations separately, if we were to add them all at the same time, then we would only have a single array that's created. For example:

- - -
-test2(A,B,C) = map((a,b,c)->a+b+c,A,B,C)
-@benchmark test2(A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  7.63 MiB
-  allocs estimate:  5
-  --------------
-  minimum time:     3.381 ms (0.00% GC)
-  median time:      3.427 ms (0.00% GC)
-  mean time:        4.754 ms (29.19% GC)
-  maximum time:     7.835 ms (58.12% GC)
-  --------------
-  samples:          1050
-  evals/sample:     1
-
- - -

Puts the whole expression into a single function call, and thus only one array is required to store output. This is the same as writing the loop:

- - -
-function test3(A,B,C)
-    D = similar(A)
-    @inbounds for i in eachindex(A)
-        D[i] = A[i] + B[i] + C[i]
-    end
-    D
-end
-@benchmark test3(A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  7.63 MiB
-  allocs estimate:  2
-  --------------
-  minimum time:     3.298 ms (0.00% GC)
-  median time:      3.385 ms (0.00% GC)
-  mean time:        4.731 ms (29.32% GC)
-  maximum time:     7.617 ms (56.09% GC)
-  --------------
-  samples:          1054
-  evals/sample:     1
-
- - -

However, Julia's broadcast is syntactic sugar for this. If multiple expressions have a ., then it will put those vectorized operations together. Thus:

- - -
-test4(A,B,C) = A .+ B .+ C
-@benchmark test4(A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  7.63 MiB
-  allocs estimate:  2
-  --------------
-  minimum time:     3.298 ms (0.00% GC)
-  median time:      3.381 ms (0.00% GC)
-  mean time:        4.731 ms (29.34% GC)
-  maximum time:     7.653 ms (55.54% GC)
-  --------------
-  samples:          1054
-  evals/sample:     1
-
- - -

is a version with only 1 array created (the output). Note that .s can be used with function calls as well:

- - -
-sin.(A) .+ sin.(B)
-
- - -
-1000×1000 Array{Float64,2}:
- 0.883491  1.46941   1.31592   1.26031   …  0.278758   0.854864  0.546223
- 0.441868  1.3608    1.0348    0.93745      0.963036   0.644012  0.79876 
- 0.6326    1.3906    0.92889   0.95655      1.30921    1.60003   0.603888
- 0.763656  0.89846   0.633754  0.751764     0.78233    0.976681  0.998619
- 0.53243   1.14022   0.974028  0.331864     1.24247    1.31588   0.504788
- 0.801383  0.463875  0.880555  0.717018  …  0.945005   1.13482   0.724712
- 0.700827  1.6268    1.20322   0.770063     0.898224   1.00471   1.3217  
- 1.02611   1.29816   0.898958  0.462772     0.0723639  1.14904   1.07084 
- 1.17631   0.919471  1.498     0.919398     0.77762    0.499798  1.22517 
- 1.15896   1.45735   1.36369   1.47575      1.15369    0.849005  0.949496
- ⋮                                       ⋱                               
- 1.30411   1.19439   0.664451  0.543255     0.719017   0.475454  0.61456 
- 1.27597   0.599419  0.303519  1.01371      1.25969    0.496569  1.33776 
- 1.37308   0.150727  0.878723  1.15994      1.32613    0.707558  0.426822
- 0.638035  0.735053  0.840038  0.814784     0.799952   1.19904   0.51726 
- 1.24534   0.64412   1.27147   1.13847   …  1.47634    0.548757  1.53114 
- 1.07713   0.951169  1.326     1.19763      0.998489   1.10742   0.669014
- 0.603015  1.55926   0.976357  0.847166     1.09955    0.229294  1.08651 
- 1.29188   1.14394   0.55431   0.88283      0.791295   0.914774  0.63556 
- 1.05681   0.870731  0.820364  0.72312      1.39749    0.793006  1.0619
-
- - -

Also, the @. macro applys a dot to every operator:

- - -
-test5(A,B,C) = @. A + B + C #only one array allocated
-@benchmark test5(A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  7.63 MiB
-  allocs estimate:  3
-  --------------
-  minimum time:     3.350 ms (0.00% GC)
-  median time:      3.395 ms (0.00% GC)
-  mean time:        4.749 ms (29.40% GC)
-  maximum time:     7.595 ms (56.66% GC)
-  --------------
-  samples:          1050
-  evals/sample:     1
-
- - -

Using these tools we can get rid of our intermediate array allocations for many vectorized function calls. But we are still allocating the output array. To get rid of that allocation, we can instead use mutation. Mutating broadcast is done via .=. For example, if we pre-allocate the output:

- - -
-D = zeros(1000,1000);
-
- - - -

Then we can keep re-using this cache for subsequent calculations. The mutating broadcasting form is:

- - -
-test6!(D,A,B,C) = D .= A .+ B .+ C #only one array allocated
-@benchmark test6!(D,A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  0 bytes
-  allocs estimate:  0
-  --------------
-  minimum time:     3.274 ms (0.00% GC)
-  median time:      3.294 ms (0.00% GC)
-  mean time:        3.301 ms (0.00% GC)
-  maximum time:     3.762 ms (0.00% GC)
-  --------------
-  samples:          1509
-  evals/sample:     1
-
- - -

If we use @. before the =, then it will turn it into .=:

- - -
-test7!(D,A,B,C) = @. D = A + B + C #only one array allocated
-@benchmark test7!(D,A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  0 bytes
-  allocs estimate:  0
-  --------------
-  minimum time:     3.278 ms (0.00% GC)
-  median time:      3.299 ms (0.00% GC)
-  mean time:        3.307 ms (0.00% GC)
-  maximum time:     3.622 ms (0.00% GC)
-  --------------
-  samples:          1506
-  evals/sample:     1
-
- - -

Notice that in this case, there is no "output", and instead the values inside of D are what are changed (like with the DiffEq inplace function). Many Julia functions have a mutating form which is denoted with a !. For example, the mutating form of the map is map!:

- - -
-test8!(D,A,B,C) = map!((a,b,c)->a+b+c,D,A,B,C)
-@benchmark test8!(D,A,B,C)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  32 bytes
-  allocs estimate:  1
-  --------------
-  minimum time:     3.286 ms (0.00% GC)
-  median time:      3.387 ms (0.00% GC)
-  mean time:        3.391 ms (0.00% GC)
-  maximum time:     3.626 ms (0.00% GC)
-  --------------
-  samples:          1469
-  evals/sample:     1
-
- - -

Some operations require using an alternate mutating form in order to be fast. For example, matrix multiplication via * allocates a temporary:

- - -
-@benchmark A*B
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  7.63 MiB
-  allocs estimate:  2
-  --------------
-  minimum time:     21.865 ms (0.00% GC)
-  median time:      21.989 ms (0.00% GC)
-  mean time:        23.508 ms (6.28% GC)
-  maximum time:     29.563 ms (15.02% GC)
-  --------------
-  samples:          213
-  evals/sample:     1
-
- - -

Instead, we can use the mutating form mul! into a cache array to avoid allocating the output:

- - -
-using LinearAlgebra
-@benchmark mul!(D,A,B) # same as D = A * B
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  0 bytes
-  allocs estimate:  0
-  --------------
-  minimum time:     21.267 ms (0.00% GC)
-  median time:      21.347 ms (0.00% GC)
-  mean time:        21.362 ms (0.00% GC)
-  maximum time:     23.280 ms (0.00% GC)
-  --------------
-  samples:          234
-  evals/sample:     1
-
- - -

For repeated calculations this reduced allocation can stop GC cycles and thus lead to more efficient code. Additionally, ***we can fuse together higher level linear algebra operations using BLAS***. The package SugarBLAS.jl makes it easy to write higher level operations like alpha*B*A + beta*C as mutating BLAS calls.

-

Example Optimization: Gierer-Meinhardt Reaction-Diffusion PDE Discretization

-

Let's optimize the solution of a Reaction-Diffusion PDE's discretization. In its discretized form, this is the ODE:

-

\[ -\begin{align} -du &= D_1 (A_y u + u A_x) + \frac{au^2}{v} + \bar{u} - \alpha u\\ -dv &= D_2 (A_y v + v A_x) + a u^2 + \beta v -\end{align} -\]

-

where $u$, $v$, and $A$ are matrices. Here, we will use the simplified version where $A$ is the tridiagonal stencil $[1,-2,1]$, i.e. it's the 2D discretization of the LaPlacian. The native code would be something along the lines of:

- - -
-# Generate the constants
-p = (1.0,1.0,1.0,10.0,0.001,100.0) # a,α,ubar,β,D1,D2
-N = 100
-Ax = Array(Tridiagonal([1.0 for i in 1:N-1],[-2.0 for i in 1:N],[1.0 for i in 1:N-1]))
-Ay = copy(Ax)
-Ax[2,1] = 2.0
-Ax[end-1,end] = 2.0
-Ay[1,2] = 2.0
-Ay[end,end-1] = 2.0
-
-function basic_version!(dr,r,p,t)
-  a,α,ubar,β,D1,D2 = p
-  u = r[:,:,1]
-  v = r[:,:,2]
-  Du = D1*(Ay*u + u*Ax)
-  Dv = D2*(Ay*v + v*Ax)
-  dr[:,:,1] = Du .+ a.*u.*u./v .+ ubar .- α*u
-  dr[:,:,2] = Dv .+ a.*u.*u .- β*v
-end
-
-a,α,ubar,β,D1,D2 = p
-uss = (ubar+β)/α
-vss = (a/β)*uss^2
-r0 = zeros(100,100,2)
-r0[:,:,1] .= uss.+0.1.*rand.()
-r0[:,:,2] .= vss
-
-prob = ODEProblem(basic_version!,r0,(0.0,0.1),p)
-
- - -
-ODEProblem with uType Array{Float64,3} and tType Float64. In-place: true
-timespan: (0.0, 0.1)
-u0: [11.0394 11.0028 … 11.0105 11.0494; 11.0489 11.0042 … 11.0641 11.0566; 
-… ; 11.0191 11.0122 … 11.0218 11.0182; 11.0192 11.0377 … 11.0569 11.0811]
-
-[12.1 12.1 … 12.1 12.1; 12.1 12.1 … 12.1 12.1; … ; 12.1 12.1 … 12.1 12.1; 1
-2.1 12.1 … 12.1 12.1]
-
- - -

In this version we have encoded our initial condition to be a 3-dimensional array, with u[:,:,1] being the A part and u[:,:,2] being the B part.

- - -
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  186.88 MiB
-  allocs estimate:  8589
-  --------------
-  minimum time:     124.746 ms (31.02% GC)
-  median time:      258.186 ms (66.57% GC)
-  mean time:        227.409 ms (62.05% GC)
-  maximum time:     313.404 ms (72.46% GC)
-  --------------
-  samples:          22
-  evals/sample:     1
-
- - -

While this version isn't very efficient,

-

We recommend writing the "high-level" code first, and iteratively optimizing it!

-

The first thing that we can do is get rid of the slicing allocations. The operation r[:,:,1] creates a temporary array instead of a "view", i.e. a pointer to the already existing memory. To make it a view, add @view. Note that we have to be careful with views because they point to the same memory, and thus changing a view changes the original values:

- - -
-A = rand(4)
-@show A
-
- - -
-A = [0.953358, 0.408393, 0.0122052, 0.277688]
-
- - - -
-B = @view A[1:3]
-B[2] = 2
-@show A
-
- - -
-A = [0.953358, 2.0, 0.0122052, 0.277688]
-4-element Array{Float64,1}:
- 0.9533580491706126  
- 2.0                 
- 0.012205168934875665
- 0.2776877187822635
-
- - -

Notice that changing B changed A. This is something to be careful of, but at the same time we want to use this since we want to modify the output dr. Additionally, the last statement is a purely element-wise operation, and thus we can make use of broadcast fusion there. Let's rewrite basic_version! to ***avoid slicing allocations*** and to ***use broadcast fusion***:

- - -
-function gm2!(dr,r,p,t)
-  a,α,ubar,β,D1,D2 = p
-  u = @view r[:,:,1]
-  v = @view r[:,:,2]
-  du = @view dr[:,:,1]
-  dv = @view dr[:,:,2]
-  Du = D1*(Ay*u + u*Ax)
-  Dv = D2*(Ay*v + v*Ax)
-  @. du = Du + a.*u.*u./v + ubar - α*u
-  @. dv = Dv + a.*u.*u - β*v
-end
-prob = ODEProblem(gm2!,r0,(0.0,0.1),p)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  119.55 MiB
-  allocs estimate:  7119
-  --------------
-  minimum time:     98.919 ms (24.81% GC)
-  median time:      168.334 ms (48.75% GC)
-  mean time:        195.261 ms (56.70% GC)
-  maximum time:     299.465 ms (71.34% GC)
-  --------------
-  samples:          26
-  evals/sample:     1
-
- - -

Now, most of the allocations are taking place in Du = D1*(Ay*u + u*Ax) since those operations are vectorized and not mutating. We should instead replace the matrix multiplications with mul!. When doing so, we will need to have cache variables to write into. This looks like:

- - -
-Ayu = zeros(N,N)
-uAx = zeros(N,N)
-Du = zeros(N,N)
-Ayv = zeros(N,N)
-vAx = zeros(N,N)
-Dv = zeros(N,N)
-function gm3!(dr,r,p,t)
-  a,α,ubar,β,D1,D2 = p
-  u = @view r[:,:,1]
-  v = @view r[:,:,2]
-  du = @view dr[:,:,1]
-  dv = @view dr[:,:,2]
-  mul!(Ayu,Ay,u)
-  mul!(uAx,u,Ax)
-  mul!(Ayv,Ay,v)
-  mul!(vAx,v,Ax)
-  @. Du = D1*(Ayu + uAx)
-  @. Dv = D2*(Ayv + vAx)
-  @. du = Du + a*u*u./v + ubar - α*u
-  @. dv = Dv + a*u*u - β*v
-end
-prob = ODEProblem(gm3!,r0,(0.0,0.1),p)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  29.76 MiB
-  allocs estimate:  5355
-  --------------
-  minimum time:     69.427 ms (6.83% GC)
-  median time:      69.752 ms (6.91% GC)
-  mean time:        71.224 ms (8.97% GC)
-  maximum time:     74.430 ms (12.65% GC)
-  --------------
-  samples:          71
-  evals/sample:     1
-
- - -

But our temporary variables are global variables. We need to either declare the caches as const or localize them. We can localize them by adding them to the parameters, p. It's easier for the compiler to reason about local variables than global variables. ***Localizing variables helps to ensure type stability***.

- - -
-p = (1.0,1.0,1.0,10.0,0.001,100.0,Ayu,uAx,Du,Ayv,vAx,Dv) # a,α,ubar,β,D1,D2
-function gm4!(dr,r,p,t)
-  a,α,ubar,β,D1,D2,Ayu,uAx,Du,Ayv,vAx,Dv = p
-  u = @view r[:,:,1]
-  v = @view r[:,:,2]
-  du = @view dr[:,:,1]
-  dv = @view dr[:,:,2]
-  mul!(Ayu,Ay,u)
-  mul!(uAx,u,Ax)
-  mul!(Ayv,Ay,v)
-  mul!(vAx,v,Ax)
-  @. Du = D1*(Ayu + uAx)
-  @. Dv = D2*(Ayv + vAx)
-  @. du = Du + a*u*u./v + ubar - α*u
-  @. dv = Dv + a*u*u - β*v
-end
-prob = ODEProblem(gm4!,r0,(0.0,0.1),p)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  29.66 MiB
-  allocs estimate:  1090
-  --------------
-  minimum time:     55.101 ms (8.56% GC)
-  median time:      55.416 ms (8.62% GC)
-  mean time:        56.881 ms (11.06% GC)
-  maximum time:     60.062 ms (15.39% GC)
-  --------------
-  samples:          88
-  evals/sample:     1
-
- - -

We could then use the BLAS gemmv to optimize the matrix multiplications some more, but instead let's devectorize the stencil.

- - -
-p = (1.0,1.0,1.0,10.0,0.001,100.0,N)
-function fast_gm!(du,u,p,t)
-  a,α,ubar,β,D1,D2,N = p
-
-  @inbounds for j in 2:N-1, i in 2:N-1
-    du[i,j,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) +
-              a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-  end
-
-  @inbounds for j in 2:N-1, i in 2:N-1
-    du[i,j,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) +
-            a*u[i,j,1]^2 - β*u[i,j,2]
-  end
-
-  @inbounds for j in 2:N-1
-    i = 1
-    du[1,j,1] = D1*(2u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) +
-            a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-  end
-  @inbounds for j in 2:N-1
-    i = 1
-    du[1,j,2] = D2*(2u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) +
-            a*u[i,j,1]^2 - β*u[i,j,2]
-  end
-  @inbounds for j in 2:N-1
-    i = N
-    du[end,j,1] = D1*(2u[i-1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) +
-           a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-  end
-  @inbounds for j in 2:N-1
-    i = N
-    du[end,j,2] = D2*(2u[i-1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) +
-           a*u[i,j,1]^2 - β*u[i,j,2]
-  end
-
-  @inbounds for i in 2:N-1
-    j = 1
-    du[i,1,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) +
-              a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-  end
-  @inbounds for i in 2:N-1
-    j = 1
-    du[i,1,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) +
-              a*u[i,j,1]^2 - β*u[i,j,2]
-  end
-  @inbounds for i in 2:N-1
-    j = N
-    du[i,end,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) +
-             a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-  end
-  @inbounds for i in 2:N-1
-    j = N
-    du[i,end,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) +
-             a*u[i,j,1]^2 - β*u[i,j,2]
-  end
-
-  @inbounds begin
-    i = 1; j = 1
-    du[1,1,1] = D1*(2u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) +
-              a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-    du[1,1,2] = D2*(2u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) +
-              a*u[i,j,1]^2 - β*u[i,j,2]
-
-    i = 1; j = N
-    du[1,N,1] = D1*(2u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) +
-             a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-    du[1,N,2] = D2*(2u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) +
-             a*u[i,j,1]^2 - β*u[i,j,2]
-
-    i = N; j = 1
-    du[N,1,1] = D1*(2u[i-1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) +
-             a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-    du[N,1,2] = D2*(2u[i-1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) +
-             a*u[i,j,1]^2 - β*u[i,j,2]
-
-    i = N; j = N
-    du[end,end,1] = D1*(2u[i-1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) +
-             a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]
-    du[end,end,2] = D2*(2u[i-1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) +
-             a*u[i,j,1]^2 - β*u[i,j,2]
-   end
-end
-prob = ODEProblem(fast_gm!,r0,(0.0,0.1),p)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  29.63 MiB
-  allocs estimate:  504
-  --------------
-  minimum time:     17.318 ms (25.55% GC)
-  median time:      17.547 ms (25.42% GC)
-  mean time:        18.928 ms (31.18% GC)
-  maximum time:     22.233 ms (39.53% GC)
-  --------------
-  samples:          265
-  evals/sample:     1
-
- - -

Lastly, we can do other things like multithread the main loops, but these optimizations get the last 2x-3x out. The main optimizations which apply everywhere are the ones we just performed (though the last one only works if your matrix is a stencil. This is known as a matrix-free implementation of the PDE discretization).

-

This gets us to about 8x faster than our original MATLAB/SciPy/R vectorized style code!

-

The last thing to do is then ***optimize our algorithm choice***. We have been using Tsit5() as our test algorithm, but in reality this problem is a stiff PDE discretization and thus one recommendation is to use CVODE_BDF(). However, instead of using the default dense Jacobian, we should make use of the sparse Jacobian afforded by the problem. The Jacobian is the matrix $\frac{df_i}{dr_j}$, where $r$ is read by the linear index (i.e. down columns). But since the $u$ variables depend on the $v$, the band size here is large, and thus this will not do well with a Banded Jacobian solver. Instead, we utilize sparse Jacobian algorithms. CVODE_BDF allows us to use a sparse Newton-Krylov solver by setting linear_solver = :GMRES (see the solver documentation, and thus we can solve this problem efficiently. Let's see how this scales as we increase the integration time.

- - -
-prob = ODEProblem(fast_gm!,r0,(0.0,10.0),p)
-@benchmark solve(prob,Tsit5())
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  2.76 GiB
-  allocs estimate:  41670
-  --------------
-  minimum time:     2.705 s (44.96% GC)
-  median time:      19.769 s (61.02% GC)
-  mean time:        19.769 s (61.02% GC)
-  maximum time:     36.833 s (62.20% GC)
-  --------------
-  samples:          2
-  evals/sample:     1
-
- - - -
-using Sundials
-@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES))
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  117.33 MiB
-  allocs estimate:  30688
-  --------------
-  minimum time:     715.584 ms (3.70% GC)
-  median time:      897.363 ms (23.22% GC)
-  mean time:        875.513 ms (21.20% GC)
-  maximum time:     956.867 ms (27.77% GC)
-  --------------
-  samples:          6
-  evals/sample:     1
-
- - - -
-prob = ODEProblem(fast_gm!,r0,(0.0,100.0),p)
-# Will go out of memory if we don't turn off `save_everystep`!
-@benchmark solve(prob,Tsit5(),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  2.91 MiB
-  allocs estimate:  112
-  --------------
-  minimum time:     9.372 s (0.00% GC)
-  median time:      9.372 s (0.00% GC)
-  mean time:        9.372 s (0.00% GC)
-  maximum time:     9.372 s (0.00% GC)
-  --------------
-  samples:          1
-  evals/sample:     1
-
- - - -
-@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES))
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  323.53 MiB
-  allocs estimate:  84834
-  --------------
-  minimum time:     2.038 s (0.00% GC)
-  median time:      2.232 s (8.59% GC)
-  mean time:        2.260 s (9.74% GC)
-  maximum time:     2.509 s (18.68% GC)
-  --------------
-  samples:          3
-  evals/sample:     1
-
- - -

Now let's check the allocation growth.

- - -
-@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  4.36 MiB
-  allocs estimate:  75057
-  --------------
-  minimum time:     1.988 s (0.00% GC)
-  median time:      1.989 s (0.00% GC)
-  mean time:        1.989 s (0.00% GC)
-  maximum time:     1.990 s (0.00% GC)
-  --------------
-  samples:          3
-  evals/sample:     1
-
- - - -
-prob = ODEProblem(fast_gm!,r0,(0.0,500.0),p)
-@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false)
-
- - -
-BenchmarkTools.Trial: 
-  memory estimate:  5.99 MiB
-  allocs estimate:  110232
-  --------------
-  minimum time:     2.918 s (0.00% GC)
-  median time:      2.920 s (0.00% GC)
-  mean time:        2.920 s (0.00% GC)
-  maximum time:     2.922 s (0.00% GC)
-  --------------
-  samples:          2
-  evals/sample:     1
-
- - -

Notice that we've elimated almost all allocations, allowing the code to grow without hitting garbage collection and slowing down.

-

Why is CVODE_BDF doing well? What's happening is that, because the problem is stiff, the number of steps required by the explicit Runge-Kutta method grows rapidly, whereas CVODE_BDF is taking large steps. Additionally, the GMRES linear solver form is quite an efficient way to solve the implicit system in this case. This is problem-dependent, and in many cases using a Krylov method effectively requires a preconditioner, so you need to play around with testing other algorithms and linear solvers to find out what works best with your problem.

-

Conclusion

-

Julia gives you the tools to optimize the solver "all the way", but you need to make use of it. The main thing to avoid is temporary allocations. For small systems, this is effectively done via static arrays. For large systems, this is done via in-place operations and cache arrays. Either way, the resulting solution can be immensely sped up over vectorized formulations by using these principles.

- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("introduction","03-optimizing_diffeq_code.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/introduction/04-callbacks_and_events.html b/html/introduction/04-callbacks_and_events.html deleted file mode 100644 index f06d0af1..00000000 --- a/html/introduction/04-callbacks_and_events.html +++ /dev/null @@ -1,1350 +0,0 @@ - - - - - - Callbacks and Events - - - - - - - - - - - - - - - - - -
-
-
- -
-

Callbacks and Events

-
Chris Rackauckas
- -
- -

In working with a differential equation, our system will evolve through many states. Particular states of the system may be of interest to us, and we say that an ***"event"*** is triggered when our system reaches these states. For example, events may include the moment when our system reaches a particular temperature or velocity. We ***handle*** these events with ***callbacks***, which tell us what to do once an event has been triggered.

-

These callbacks allow for a lot more than event handling, however. For example, we can use callbacks to achieve high-level behavior like exactly preserve conservation laws and save the trace of a matrix at pre-defined time points. This extra functionality allows us to use the callback system as a modding system for the DiffEq ecosystem's solvers.

-

This tutorial is an introduction to the callback and event handling system in DifferentialEquations.jl, documented in the Event Handling and Callback Functions page of the documentation. We will also introduce you to some of the most widely used callbacks in the Callback Library, which is a library of pre-built mods.

-

Events and Continuous Callbacks

-

Event handling is done through continuous callbacks. Callbacks take a function, condition, which triggers an affect! when condition == 0. These callbacks are called "continuous" because they will utilize rootfinding on the interpolation to find the "exact" time point at which the condition takes place and apply the affect! at that time point.

-

***Let's use a bouncing ball as a simple system to explain events and callbacks.*** Let's take Newton's model of a ball falling towards the Earth's surface via a gravitational constant g. In this case, the velocity is changing via -g, and position is changing via the velocity. Therefore we receive the system of ODEs:

- - -
-using DifferentialEquations, ParameterizedFunctions
-ball! = @ode_def BallBounce begin
-  dy =  v
-  dv = -g
-end g
-
- - -
-(::Main.WeaveSandBox8.BallBounce{getfield(Main.WeaveSandBox8, Symbol("##1#5
-")),getfield(Main.WeaveSandBox8, Symbol("##2#6")),getfield(Main.WeaveSandBo
-x8, Symbol("##3#7")),Nothing,Nothing,getfield(Main.WeaveSandBox8, Symbol("#
-#4#8")),Expr,Expr}) (generic function with 2 methods)
-
- - -

We want the callback to trigger when y=0 since that's when the ball will hit the Earth's surface (our event). We do this with the condition:

- - -
-function condition(u,t,integrator)
-  u[1]
-end
-
- - -
-condition (generic function with 1 method)
-
- - -

Recall that the condition will trigger when it evaluates to zero, and here it will evaluate to zero when u[1] == 0, which occurs when v == 0. Now we have to say what we want the callback to do. Callbacks make use of the Integrator Interface. Instead of giving a full description, a quick and usable rundown is:

-
    -
  • Values are strored in integrator.u

    -
  • -
  • Times are stored in integrator.t

    -
  • -
  • The parameters are stored in integrator.p

    -
  • -
  • integrator(t) performs an interpolation in the current interval between integrator.tprev and integrator.t (and allows extrapolation)

    -
  • -
  • User-defined options (tolerances, etc.) are stored in integrator.opts

    -
  • -
  • integrator.sol is the current solution object. Note that integrator.sol.prob is the current problem

    -
  • -
-

While there's a lot more on the integrator interface page, that's a working knowledge of what's there.

-

What we want to do with our affect! is to "make the ball bounce". Mathematically speaking, the ball bounces when the sign of the velocity flips. As an added behavior, let's also use a small friction constant to dampen the ball's velocity. This way only a percentage of the velocity will be retained when the event is triggered and the callback is used. We'll define this behavior in the affect! function:

- - -
-function affect!(integrator)
-    integrator.u[2] = -integrator.p[2] * integrator.u[2]
-end
-
- - -
-affect! (generic function with 1 method)
-
- - -

integrator.u[2] is the second value of our model, which is v or velocity, and integrator.p[2], is our friction coefficient.

-

Therefore affect! can be read as follows: affect! will take the current value of velocity, and multiply it -1 multiplied by our friction coefficient. Therefore the ball will change direction and its velocity will dampen when affect! is called.

-

Now let's build the ContinuousCallback:

- - -
-bounce_cb = ContinuousCallback(condition,affect!)
-
- - -
-DiffEqBase.ContinuousCallback{typeof(Main.WeaveSandBox8.condition),typeof(M
-ain.WeaveSandBox8.affect!),typeof(Main.WeaveSandBox8.affect!),typeof(DiffEq
-Base.INITIALIZE_DEFAULT),Float64,Int64,Nothing}(Main.WeaveSandBox8.conditio
-n, Main.WeaveSandBox8.affect!, Main.WeaveSandBox8.affect!, DiffEqBase.INITI
-ALIZE_DEFAULT, nothing, true, 10, Bool[true, true], 2.220446049250313e-15, 
-0)
-
- - -

Now let's make an ODEProblem which has our callback:

- - -
-u0 = [50.0,0.0]
-tspan = (0.0,15.0)
-p = (9.8,0.9)
-prob = ODEProblem(ball!,u0,tspan,p,callback=bounce_cb)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 15.0)
-u0: [50.0, 0.0]
-
- - -

Notice that we chose a friction constant of 0.9. Now we can solve the problem and plot the solution as we normally would:

- - -
-sol = solve(prob,Tsit5())
-using Plots; gr()
-plot(sol)
-
- - - - -

and tada, the ball bounces! Notice that the ContinuousCallback is using the interpolation to apply the effect "exactly" when v == 0. This is crucial for model correctness, and thus when this property is needed a ContinuousCallback should be used.

-

Exercise 1

-

In our example we used a constant coefficient of friction, but if we are bouncing the ball in the same place we may be smoothing the surface (say, squishing the grass), causing there to be less friction after each bounce. In this more advanced model, we want the friction coefficient at the next bounce to be sqrt(friction) from the previous bounce (since friction < 1, sqrt(friction) > friction and sqrt(friction) < 1).

-

Hint: there are many ways to implement this. One way to do it is to make p a Vector and mutate the friction coefficient in the affect!.

-

Discrete Callbacks

-

A discrete callback checks a condition after every integration step and, if true, it will apply an affect!. For example, let's say that at time t=2 we want to include that a kid kicked the ball, adding 20 to the current velocity. This kind of situation, where we want to add a specific behavior which does not require rootfinding, is a good candidate for a DiscreteCallback. In this case, the condition is a boolean for whether to apply the affect!, so:

- - -
-function condition_kick(u,t,integrator)
-    t == 2
-end
-
- - -
-condition_kick (generic function with 1 method)
-
- - -

We want the kick to occur at t=2, so we check for that time point. When we are at this time point, we want to do:

- - -
-function affect_kick!(integrator)
-    integrator.u[2] += 50
-end
-
- - -
-affect_kick! (generic function with 1 method)
-
- - -

Now we build the problem as before:

- - -
-kick_cb = DiscreteCallback(condition_kick,affect_kick!)
-u0 = [50.0,0.0]
-tspan = (0.0,10.0)
-p = (9.8,0.9)
-prob = ODEProblem(ball!,u0,tspan,p,callback=kick_cb)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 10.0)
-u0: [50.0, 0.0]
-
- - -

Note that, since we are requiring our effect at exactly the time t=2, we need to tell the integration scheme to step at exactly t=2 to apply this callback. This is done via the option tstops, which is like saveat but means "stop at these values".

- - -
-sol = solve(prob,Tsit5(),tstops=[2.0])
-plot(sol)
-
- - - - -

Note that this example could've been done with a ContinuousCallback by checking the condition t-2.

-

Merging Callbacks with Callback Sets

-

In some cases you may want to merge callbacks to build up more complex behavior. In our previous result, notice that the model is unphysical because the ball goes below zero! What we really need to do is add the bounce callback together with the kick. This can be achieved through the CallbackSet.

- - -
-cb = CallbackSet(bounce_cb,kick_cb)
-
- - -
-DiffEqBase.CallbackSet{Tuple{DiffEqBase.ContinuousCallback{typeof(Main.Weav
-eSandBox8.condition),typeof(Main.WeaveSandBox8.affect!),typeof(Main.WeaveSa
-ndBox8.affect!),typeof(DiffEqBase.INITIALIZE_DEFAULT),Float64,Int64,Nothing
-}},Tuple{DiffEqBase.DiscreteCallback{typeof(Main.WeaveSandBox8.condition_ki
-ck),typeof(Main.WeaveSandBox8.affect_kick!),typeof(DiffEqBase.INITIALIZE_DE
-FAULT)}}}((DiffEqBase.ContinuousCallback{typeof(Main.WeaveSandBox8.conditio
-n),typeof(Main.WeaveSandBox8.affect!),typeof(Main.WeaveSandBox8.affect!),ty
-peof(DiffEqBase.INITIALIZE_DEFAULT),Float64,Int64,Nothing}(Main.WeaveSandBo
-x8.condition, Main.WeaveSandBox8.affect!, Main.WeaveSandBox8.affect!, DiffE
-qBase.INITIALIZE_DEFAULT, nothing, true, 10, Bool[true, true], 2.2204460492
-50313e-15, 0),), (DiffEqBase.DiscreteCallback{typeof(Main.WeaveSandBox8.con
-dition_kick),typeof(Main.WeaveSandBox8.affect_kick!),typeof(DiffEqBase.INIT
-IALIZE_DEFAULT)}(Main.WeaveSandBox8.condition_kick, Main.WeaveSandBox8.affe
-ct_kick!, DiffEqBase.INITIALIZE_DEFAULT, Bool[true, true]),))
-
- - -

A CallbackSet merges their behavior together. The logic is as follows. In a given interval, if there are multiple continuous callbacks that would trigger, only the one that triggers at the earliest time is used. The time is pulled back to where that continuous callback is triggered, and then the DiscreteCallbacks in the callback set are called in order.

- - -
-u0 = [50.0,0.0]
-tspan = (0.0,15.0)
-p = (9.8,0.9)
-prob = ODEProblem(ball!,u0,tspan,p,callback=cb)
-sol = solve(prob,Tsit5(),tstops=[2.0])
-plot(sol)
-
- - - - -

Notice that we have now merged the behaviors. We can then nest this as deep as we like.

-

Exercise 2

-

Add to the model a linear wind with resistance that changes the acceleration to -g + k*v after t=10. Do so by adding another parameter and allowing it to be zero until a specific time point where a third callback triggers the change.

-

Integration Termination and Directional Handling

-

Let's look at another model now: the model of the Harmonic Oscillator. We can write this as:

- - -
-u0 = [1.,0.]
-harmonic! = @ode_def HarmonicOscillator begin
-   dv = -x
-   dx = v
-end
-tspan = (0.0,10.0)
-prob = ODEProblem(harmonic!,u0,tspan)
-sol = solve(prob)
-plot(sol)
-
- - - - -

Let's instead stop the integration when a condition is met. From the Integrator Interface stepping controls we see that terminate!(integrator) will cause the integration to end. So our new affect! is simply:

- - -
-function terminate_affect!(integrator)
-    terminate!(integrator)
-end
-
- - -
-terminate_affect! (generic function with 1 method)
-
- - -

Let's first stop the integration when the particle moves back to x=0. This means we want to use the condition:

- - -
-function terminate_condition(u,t,integrator)
-    u[2]
-end
-terminate_cb = ContinuousCallback(terminate_condition,terminate_affect!)
-
- - -
-DiffEqBase.ContinuousCallback{typeof(Main.WeaveSandBox8.terminate_condition
-),typeof(Main.WeaveSandBox8.terminate_affect!),typeof(Main.WeaveSandBox8.te
-rminate_affect!),typeof(DiffEqBase.INITIALIZE_DEFAULT),Float64,Int64,Nothin
-g}(Main.WeaveSandBox8.terminate_condition, Main.WeaveSandBox8.terminate_aff
-ect!, Main.WeaveSandBox8.terminate_affect!, DiffEqBase.INITIALIZE_DEFAULT, 
-nothing, true, 10, Bool[true, true], 2.220446049250313e-15, 0)
-
- - -

Note that instead of adding callbacks to the problem, we can also add them to the solve command. This will automatically form a CallbackSet with any problem-related callbacks and naturally allows you to distinguish between model features and integration controls.

- - -
-sol = solve(prob,callback=terminate_cb)
-plot(sol)
-
- - - - -

Notice that the harmonic oscilator's true solution here is sin and cosine, and thus we would expect this return to zero to happen at t=π:

- - -
-sol.t[end]
-
- - -
-3.1415902498303465
-
- - -

This is one way to approximate π! Lower tolerances and arbitrary precision numbers can make this more exact, but let's not look at that. Instead, what if we wanted to halt the integration after exactly one cycle? To do so we would need to ignore the first zero-crossing. Luckily in these types of scenarios there's usually a structure to the problem that can be exploited. Here, we only want to trigger the affect! when crossing from positive to negative, and not when crossing from negative to positive. In other words, we want our affect! to only occur on upcrossings.

-

If the ContinuousCallback constructor is given a single affect!, it will occur on both upcrossings and downcrossings. If there are two affect!s given, then the first is for upcrossings and the second is for downcrossings. An affect! can be ignored by using nothing. Together, the "upcrossing-only" version of the effect means that the first affect! is what we defined above and the second is nothing. Therefore we want:

- - -
-terminate_upcrossing_cb = ContinuousCallback(terminate_condition,terminate_affect!,nothing)
-
- - -
-DiffEqBase.ContinuousCallback{typeof(Main.WeaveSandBox8.terminate_condition
-),typeof(Main.WeaveSandBox8.terminate_affect!),Nothing,typeof(DiffEqBase.IN
-ITIALIZE_DEFAULT),Float64,Int64,Nothing}(Main.WeaveSandBox8.terminate_condi
-tion, Main.WeaveSandBox8.terminate_affect!, nothing, DiffEqBase.INITIALIZE_
-DEFAULT, nothing, true, 10, Bool[true, true], 2.220446049250313e-15, 0)
-
- - -

Which gives us:

- - -
-sol = solve(prob,callback=terminate_upcrossing_cb)
-plot(sol)
-
- - - - -

Callback Library

-

As you can see, callbacks can be very useful and through CallbackSets we can merge together various behaviors. Because of this utility, there is a library of pre-built callbacks known as the Callback Library. We will walk through a few examples where these callbacks can come in handy.

-

Manifold Projection

-

One callback is the manifold projection callback. Essentially, you can define any manifold g(sol)=0 which the solution must live on, and cause the integration to project to that manifold after every step. As an example, let's see what happens if we naively run the harmonic oscillator for a long time:

- - -
-tspan = (0.0,10000.0)
-prob = ODEProblem(harmonic!,u0,tspan)
-sol = solve(prob)
-gr(fmt=:png) # Make it a PNG instead of an SVG since there's a lot of points!
-plot(sol,vars=(1,2))
-
- - - - - -
-plot(sol,vars=(0,1),denseplot=false)
-
- - - - -

Notice that what's going on is that the numerical solution is drifting from the true solution over this long time scale. This is because the integrator is not conserving energy.

- - -
-plot(sol.t,[u[2]^2 + u[1]^2 for u in sol.u]) # Energy ~ x^2 + v^2
-
- - - - -

Some integration techniques like symplectic integrators are designed to mitigate this issue, but instead let's tackle the problem by enforcing conservation of energy. To do so, we define our manifold as the one where energy equals 1 (since that holds in the initial condition), that is:

- - -
-function g(resid,u,p,t)
-  resid[1] = u[2]^2 + u[1]^2 - 1
-  resid[2] = 0
-end
-
- - -
-g (generic function with 1 method)
-
- - -

Here the residual measures how far from our desired energy we are, and the number of conditions matches the size of our system (we ignored the second one by making the residual 0). Thus we define a ManifoldProjection callback and add that to the solver:

- - -
-cb = ManifoldProjection(g)
-sol = solve(prob,callback=cb)
-plot(sol,vars=(1,2))
-
- - - - - -
-plot(sol,vars=(0,1),denseplot=false)
-
- - - - -

Now we have "perfect" energy conservation, where if it's ever violated too much the solution will get projected back to energy=1.

- - -
-u1,u2 = sol[500]
-u2^2 + u1^2
-
- - -
-1.0000425845786414
-
- - -

While choosing different integration schemes and using lower tolerances can achieve this effect as well, this can be a nice way to enforce physical constraints and is thus used in many disciplines like molecular dynamics. Another such domain constraining callback is the PositiveCallback() which can be used to enforce positivity of the variables.

-

SavingCallback

-

The SavingCallback can be used to allow for special saving behavior. Let's take a linear ODE define on a system of 1000x1000 matrices:

- - -
-prob = ODEProblem((du,u,p,t)->du.=u,rand(1000,1000),(0.0,1.0))
-
- - -
-ODEProblem with uType Array{Float64,2} and tType Float64. In-place: true
-timespan: (0.0, 1.0)
-u0: [0.620858 0.0652844 … 0.791104 0.126102; 0.941786 0.411355 … 0.0193275 
-0.155585; … ; 0.0920818 0.250822 … 0.966273 0.292458; 0.270047 0.335093 … 0
-.338701 0.826523]
-
- - -

In fields like quantum mechanics you may only want to know specific properties of the solution such as the trace or the norm of the matrix. Saving all of the 1000x1000 matrices can be a costly way to get this information! Instead, we can use the SavingCallback to save the trace and norm at specified times. To do so, we first define our SavedValues cache. Our time is in terms of Float64, and we want to save tuples of Float64s (one for the trace and one for the norm), and thus we generate the cache as:

- - -
-saved_values = SavedValues(Float64, Tuple{Float64,Float64})
-
- - -
-SavedValues{tType=Float64, savevalType=Tuple{Float64,Float64}}
-t:
-Float64[]
-saveval:
-Tuple{Float64,Float64}[]
-
- - -

Now we define the SavingCallback by giving it a function of (u,p,t,integrator) that returns the values to save, and the cache:

- - -
-using LinearAlgebra
-cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values)
-
- - -
-DiffEqBase.DiscreteCallback{getfield(DiffEqCallbacks, Symbol("##28#29")),Di
-ffEqCallbacks.SavingAffect{getfield(Main.WeaveSandBox8, Symbol("##21#22")),
-Float64,Tuple{Float64,Float64},DataStructures.BinaryHeap{Float64,DataStruct
-ures.LessThan},Array{Float64,1}},typeof(DiffEqCallbacks.saving_initialize)}
-(getfield(DiffEqCallbacks, Symbol("##28#29"))(), DiffEqCallbacks.SavingAffe
-ct{getfield(Main.WeaveSandBox8, Symbol("##21#22")),Float64,Tuple{Float64,Fl
-oat64},DataStructures.BinaryHeap{Float64,DataStructures.LessThan},Array{Flo
-at64,1}}(getfield(Main.WeaveSandBox8, Symbol("##21#22"))(), SavedValues{tTy
-pe=Float64, savevalType=Tuple{Float64,Float64}}
-t:
-Float64[]
-saveval:
-Tuple{Float64,Float64}[], DataStructures.BinaryHeap{Float64,DataStructures.
-LessThan}(DataStructures.LessThan(), Float64[]), Float64[], true, true, 0),
- DiffEqCallbacks.saving_initialize, Bool[false, false])
-
- - -

Here we take u and save (tr(u),norm(u)). When we solve with this callback:

- - -
-sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 0-element Array{Float64,1}
-u: 0-element Array{Array{Float64,2},1}
-
- - -

Our values are stored in our saved_values variable:

- - -
-saved_values.t
-
- - -
-5-element Array{Float64,1}:
- 0.0                
- 0.10012880533703399
- 0.3483895172587412 
- 0.6837345412350667 
- 1.0
-
- - - -
-saved_values.saveval
-
- - -
-5-element Array{Tuple{Float64,Float64},1}:
- (521.2188816231161, 577.5708108810427) 
- (576.1101512173013, 638.396686933491)  
- (738.4545731192429, 818.2930849840106) 
- (1032.671678033474, 1144.3196696910056)
- (1416.8197522959554, 1570.000170864033)
-
- - -

By default this happened only at the solver's steps. But the SavingCallback has similar controls as the integrator. For example, if we want to save at every 0.1 seconds, we do can so using saveat:

- - -
-saved_values = SavedValues(Float64, Tuple{Float64,Float64}) # New cache
-cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values, saveat = 0.0:0.1:1.0)
-sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving
-
- - -
-retcode: Success
-Interpolation: 1st order linear
-t: 0-element Array{Float64,1}
-u: 0-element Array{Array{Float64,2},1}
-
- - - -
-saved_values.t
-
- - -
-11-element Array{Float64,1}:
- 0.0
- 0.1
- 0.2
- 0.3
- 0.4
- 0.5
- 0.6
- 0.7
- 0.8
- 0.9
- 1.0
-
- - - -
-saved_values.saveval
-
- - -
-11-element Array{Tuple{Float64,Float64},1}:
- (521.2188816231161, 577.5708108810427)  
- (576.0359499339363, 638.3144633285659)  
- (636.6182023672519, 705.446606649904)   
- (703.5718556709452, 779.6389991235587)  
- (777.5673418091517, 861.6345569155045)  
- (859.3446126138218, 952.2532322466953)  
- (949.7223545366644, 1052.4022244041187) 
- (1049.6059336885553, 1163.0847837634562)
- (1159.9940460767452, 1285.4075810211646)
- (1281.9911707976896, 1420.5945067726018)
- (1416.8197522959554, 1570.000170864033)
-
- - -

Exercise 3

-

Go back to the Harmonic oscillator. Use the SavingCallback to save an array for the energy over time, and do this both with and without the ManifoldProjection. Plot the results to see the difference the projection makes.

- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("introduction","04-callbacks_and_events.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/introduction/05-formatting_plots.html b/html/introduction/05-formatting_plots.html deleted file mode 100644 index f028059f..00000000 --- a/html/introduction/05-formatting_plots.html +++ /dev/null @@ -1,931 +0,0 @@ - - - - - - Formatting Plots - - - - - - - - - - - - - - - - - -
-
-
- -
-

Formatting Plots

-
Chris Rackauckas
- -
- -

Since the plotting functionality is implemented as a recipe to Plots.jl, all of the options open to Plots.jl can be used in our plots. In addition, there are special features specifically for differential equation plots. This tutorial will teach some of the most commonly used options. Let's first get the solution to some ODE. Here I will use one of the Lorenz ordinary differential equation. As with all commands in DifferentialEquations.jl, I got a plot of the solution by calling solve on the problem, and plot on the solution:

- - -
-using DifferentialEquations, Plots, ParameterizedFunctions
-gr()
-lorenz = @ode_def Lorenz begin
-  dx = σ*(y-x)
-  dy = ρ*x-y-x*z
-  dz = x*y-β*z
-end σ β ρ
-
-p = [10.0,8/3,28]
-u0 = [1., 5., 10.]
-tspan = (0., 100.)
-prob = ODEProblem(lorenz, u0, tspan, p)
-sol = solve(prob)
-
- - -
-retcode: Success
-Interpolation: Automatic order switching interpolation
-t: 1345-element Array{Float64,1}:
-   0.0                
-   0.0354861341350177 
-   0.06066394416099348
-   0.10188862127421744
-   0.14484947449428065
-   0.1983564366367698 
-   0.2504990626839506 
-   0.3056767768177142 
-   0.3545280034970155 
-   0.40770977583939344
-   ⋮                  
-  99.43850921240367   
-  99.50820376840878   
-  99.5966810633544    
-  99.68534828643361   
-  99.77443728645414   
-  99.84980869692284   
-  99.9110153350651    
-  99.96735878458976   
- 100.0                
-u: 1345-element Array{Array{Float64,1},1}:
- [1.0, 5.0, 10.0]              
- [2.31565, 5.89756, 9.40679]   
- [3.23779, 7.04103, 9.23368]   
- [4.99386, 9.83293, 9.62611]   
- [7.42116, 13.9492, 11.5823]   
- [11.4597, 19.7531, 18.1042]   
- [15.4761, 21.5109, 29.8871]   
- [16.4475, 13.1242, 40.9711]   
- [12.8778, 2.61892, 41.2525]   
- [7.13698, -3.09341, 35.5052]  
- ⋮                             
- [-0.565857, -0.921084, 12.316]
- [-0.938645, -1.68821, 10.2883]
- [-1.99467, -3.77821, 8.43264] 
- [-4.49924, -8.6807, 8.21365]  
- [-10.002, -18.2144, 14.267]   
- [-16.1431, -22.0169, 31.2682] 
- [-16.359, -10.5027, 42.8011]  
- [-10.8707, 0.963967, 39.9983] 
- [-7.09417, 3.84177, 35.957]
-
- - - -
-plot(sol)
-
- - - - -

Now let's change it to a phase plot. As discussed in the plot functions page, we can use the vars command to choose the variables to plot. Let's plot variable x vs variable y vs variable z:

- - -
-plot(sol,vars=(1, 2, 3))
-
- - - - -

We can also choose to plot the timeseries for a single variable:

- - -
-plot(sol,vars=[:x])
-
- - - - -

Notice that we were able to use the variable names because we had defined the problem with the macro. But in general, we can use the indices. The previous plots would be:

- - -
-plot(sol,vars=(1,2,3))
-plot(sol,vars=[1])
-
- - - - -

Common options are to add titles, axis, and labels. For example:

- - -
-plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line",
-xaxis="Time (t)",yaxis="u(t) (in mm)",label=["X","Y","Z"])
-
- - - - -

Notice that series recipes apply to the solution type as well. For example, we can use a scatter plot on the timeseries:

- - -
-scatter(sol,vars=[:x])
-
- - - - -

This shows that the recipe is using the interpolation to smooth the plot. It becomes abundantly clear when we turn it off using denseplot=false:

- - -
-plot(sol,vars=(1,2,3),denseplot=false)
-
- - - - -

When this is done, only the values the timestep hits are plotted. Using the interpolation usually results in a much nicer looking plot so it's recommended, and since the interpolations have similar orders to the numerical methods, their results are trustworthy on the full interval. We can control the number of points used in the interpolation's plot using the plotdensity command:

- - -
-plot(sol,vars=(1,2,3),plotdensity=100)
-
- - - - -

That's plotting the entire solution using 100 points spaced evenly in time.

- - -
-plot(sol,vars=(1,2,3),plotdensity=10000)
-
- - - - -

That's more like it! By default it uses 100*length(sol), where the length is the number of internal steps it had to take. This heuristic usually does well, but unusually difficult equations it can be relaxed (since it will take small steps), and for equations with events / discontinuities raising the plot density can help resolve the discontinuity.

-

Lastly notice that we can compose plots. Let's show where the 100 points are using a scatter plot:

- - -
-plot(sol,vars=(1,2,3))
-scatter!(sol,vars=(1,2,3),plotdensity=100)
-
- - - - -

We can instead work with an explicit plot object. This form can be better for building a complex plot in a loop.

- - -
-p = plot(sol,vars=(1,2,3))
-scatter!(p,sol,vars=(1,2,3),plotdensity=100)
-title!("I added a title")
-
- - - - -

You can do all sorts of things. Have fun!

- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("introduction","05-formatting_plots.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[159f3aea-2a34-519c-b102-8c37f9878175] Cairo 0.6.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.2
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.16.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.1
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/models/01-classical_physics.html b/html/models/01-classical_physics.html deleted file mode 100644 index 235a171b..00000000 --- a/html/models/01-classical_physics.html +++ /dev/null @@ -1,1169 +0,0 @@ - - - - - - Classical Physics Models - - - - - - - - - - - - - - - - - -
-
-
- -
-

Classical Physics Models

-
Yingbo Ma, Chris Rackauckas
- -
- -

If you're getting some cold feet to jump in to DiffEq land, here are some handcrafted differential equations mini problems to hold your hand along the beginning of your journey.

-

Radioactive Decay of Carbon-14

-

First order linear ODE

-

\[ -f(t,u) = \frac{du}{dt} -\]

-

The Radioactive decay problem is the first order linear ODE problem of an exponential with a negative coefficient, which represents the half-life of the process in question. Should the coefficient be positive, this would represent a population growth equation.

- - -
-using OrdinaryDiffEq, Plots
-gr()
-
-#Half-life of Carbon-14 is 5,730 years.
-C₁ = 5.730
-
-#Setup
-u₀ = 1.0
-tspan = (0.0, 1.0)
-
-#Define the problem
-radioactivedecay(u,p,t) = -C₁*u
-
-#Pass to solver
-prob = ODEProblem(radioactivedecay,u₀,tspan)
-sol = solve(prob,Tsit5())
-
-#Plot
-plot(sol,linewidth=2,title ="Carbon-14 half-life", xaxis = "Time in thousands of years", yaxis = "Percentage left", label = "Numerical Solution")
-plot!(sol.t, t->exp(-C₁*t),lw=3,ls=:dash,label="Analytical Solution")
-
- - - - -

Simple Pendulum

-

Second Order Linear ODE

-

We will start by solving the pendulum problem. In the physics class, we often solve this problem by small angle approximation, i.e. $ sin(\theta) \approx \theta$, because otherwise, we get an elliptic integral which doesn't have an analytic solution. The linearized form is

-

\[ -\ddot{\theta} + \frac{g}{L}{\theta} = 0 -\]

-

But we have numerical ODE solvers! Why not solve the real pendulum?

-

\[ -\ddot{\theta} + \frac{g}{L}{\sin(\theta)} = 0 -\]

- - -
-# Simple Pendulum Problem
-using OrdinaryDiffEq, Plots
-
-#Constants
-const g = 9.81
-L = 1.0
-
-#Initial Conditions
-u₀ = [0,π/2]
-tspan = (0.0,6.3)
-
-#Define the problem
-function simplependulum(du,u,p,t)
-    θ  = u[1]
-     = u[2]
-    du[1] = 
-    du[2] = -(g/L)*sin(θ)
-end
-
-#Pass to solvers
-prob = ODEProblem(simplependulum,u₀, tspan)
-sol = solve(prob,Tsit5())
-
-#Plot
-plot(sol,linewidth=2,title ="Simple Pendulum Problem", xaxis = "Time", yaxis = "Height", label = ["Theta","dTheta"])
-
- - - - -

So now we know that behaviour of the position versus time. However, it will be useful to us to look at the phase space of the pendulum, i.e., and representation of all possible states of the system in question (the pendulum) by looking at its velocity and position. Phase space analysis is ubiquitous in the analysis of dynamical systems, and thus we will provide a few facilities for it.

- - -
-p = plot(sol,vars = (1,2), xlims = (-9,9), title = "Phase Space Plot", xaxis = "Velocity", yaxis = "Position", leg=false)
-function phase_plot(prob, u0, p, tspan=2pi)
-    _prob = ODEProblem(prob.f,u0,(0.0,tspan))
-    sol = solve(_prob,Vern9()) # Use Vern9 solver for higher accuracy
-    plot!(p,sol,vars = (1,2), xlims = nothing, ylims = nothing)
-end
-for i in -4pi:pi/2:4π
-    for j in -4pi:pi/2:4π
-        phase_plot(prob, [j,i], p)
-    end
-end
-plot(p,xlims = (-9,9))
-
- - - - -

Simple Harmonic Oscillator

-

Double Pendulum

- - -
-#Double Pendulum Problem
-using OrdinaryDiffEq, Plots
-
-#Constants and setup
-const m₁, m₂, L₁, L₂ = 1, 2, 1, 2
-initial = [0, π/3, 0, 3pi/5]
-tspan = (0.,50.)
-
-#Convenience function for transforming from polar to Cartesian coordinates
-function polar2cart(sol;dt=0.02,l1=L₁,l2=L₂,vars=(2,4))
-    u = sol.t[1]:dt:sol.t[end]
-
-    p1 = l1*map(x->x[vars[1]], sol.(u))
-    p2 = l2*map(y->y[vars[2]], sol.(u))
-
-    x1 = l1*sin.(p1)
-    y1 = l1*-cos.(p1)
-    (u, (x1 + l2*sin.(p2),
-     y1 - l2*cos.(p2)))
-end
-
-#Define the Problem
-function double_pendulum(xdot,x,p,t)
-    xdot[1]=x[2]
-    xdot[2]=-((g*(2*m₁+m₂)*sin(x[1])+m₂*(g*sin(x[1]-2*x[3])+2*(L₂*x[4]^2+L₁*x[2]^2*cos(x[1]-x[3]))*sin(x[1]-x[3])))/(2*L₁*(m₁+m₂-m₂*cos(x[1]-x[3])^2)))
-    xdot[3]=x[4]
-    xdot[4]=(((m₁+m₂)*(L₁*x[2]^2+g*cos(x[1]))+L₂*m₂*x[4]^2*cos(x[1]-x[3]))*sin(x[1]-x[3]))/(L₂*(m₁+m₂-m₂*cos(x[1]-x[3])^2))
-end
-
-#Pass to Solvers
-double_pendulum_problem = ODEProblem(double_pendulum, initial, tspan)
-sol = solve(double_pendulum_problem, Vern7(), abs_tol=1e-10, dt=0.05);
-
- - - - -
-#Obtain coordinates in Cartesian Geometry
-ts, ps = polar2cart(sol, l1=L₁, l2=L₂, dt=0.01)
-plot(ps...)
-
- - - - -

Poincaré section

-

The Poincaré section is a contour plot of a higher-dimensional phase space diagram. It helps to understand the dynamic interactions and is wonderfully pretty.

-

The following equation came from StackOverflow question

-

\[ -\frac{d}{dt} - \begin{pmatrix} - \alpha \\ l_\alpha \\ \beta \\ l_\beta - \end{pmatrix}= - \begin{pmatrix} - 2\frac{l_\alpha - (1+\cos\beta)l_\beta}{3-\cos 2\beta} \\ - -2\sin\alpha - \sin(\alpha + \beta) \\ - 2\frac{-(1+\cos\beta)l_\alpha + (3+2\cos\beta)l_\beta}{3-\cos2\beta}\\ - -\sin(\alpha+\beta) - 2\sin(\beta)\frac{(l_\alpha-l_\beta)l_\beta}{3-\cos2\beta} + 2\sin(2\beta)\frac{l_\alpha^2-2(1+\cos\beta)l_\alpha l_\beta + (3+2\cos\beta)l_\beta^2}{(3-\cos2\beta)^2} - \end{pmatrix} -\]

-

The Poincaré section here is the collection of $(β,l_β)$ when $α=0$ and $\frac{dα}{dt}>0$.

-

Hamiltonian of a double pendulum

-

Now we will plot the Hamiltonian of a double pendulum

- - -
-#Constants and setup
-using OrdinaryDiffEq
-initial2 = [0.01, 0.005, 0.01, 0.01]
-tspan2 = (0.,200.)
-
-#Define the problem
-function double_pendulum_hamiltonian(udot,u,p,t)
-    α  = u[1]
-     = u[2]
-    β  = u[3]
-     = u[4]
-    udot .=
-    [2(-(1+cos(β)))/(3-cos(2β)),
-    -2sin(α) - sin(α+β),
-    2(-(1+cos(β)) + (3+2cos(β)))/(3-cos(2β)),
-    -sin(α+β) - 2sin(β)*(((-))/(3-cos(2β))) + 2sin(2β)*((^2 - 2(1+cos(β))* + (3+2cos(β))^2)/(3-cos(2β))^2)]
-end
-
-# Construct a ContiunousCallback
-condition(u,t,integrator) = u[1]
-affect!(integrator) = nothing
-cb = ContinuousCallback(condition,affect!,nothing,
-                        save_positions = (true,false))
-
-# Construct Problem
-poincare = ODEProblem(double_pendulum_hamiltonian, initial2, tspan2)
-sol2 = solve(poincare, Vern9(), save_everystep = false, callback=cb, abstol=1e-9)
-
-function poincare_map(prob, u₀, p; callback=cb)
-    _prob = ODEProblem(prob.f,[0.01, 0.01, 0.01, u₀],prob.tspan)
-    sol = solve(_prob, Vern9(), save_everystep = false, callback=cb, abstol=1e-9)
-    scatter!(p, sol, vars=(3,4), markersize = 2)
-end
-
- - -
-poincare_map (generic function with 1 method)
-
- - - -
-p = scatter(sol2, vars=(3,4), leg=false, markersize = 2, ylims=(-0.01,0.03))
-for i in -0.01:0.00125:0.01
-    poincare_map(poincare, i, p)
-end
-plot(p,ylims=(-0.01,0.03))
-
- - - - -

Hénon-Heiles System

-

The Hénon-Heiles potential occurs when non-linear motion of a star around a galactic center with the motion restricted to a plane.

-

\[ -\begin{align} -\frac{d^2x}{dt^2}&=-\frac{\partial V}{\partial x}\\ -\frac{d^2y}{dt^2}&=-\frac{\partial V}{\partial y} -\end{align} -\]

-

where

-

\[ -V(x,y)={\frac {1}{2}}(x^{2}+y^{2})+\lambda \left(x^{2}y-{\frac {y^{3}}{3}}\right). -\]

-

We pick $\lambda=1$ in this case, so

-

\[ -V(x,y) = \frac{1}{2}(x^2+y^2+2x^2y-\frac{2}{3}y^3). -\]

-

Then the total energy of the system can be expressed by

-

\[ -E = T+V = V(x,y)+\frac{1}{2}(\dot{x}^2+\dot{y}^2). -\]

-

The total energy should conserve as this system evolves.

- - -
-using OrdinaryDiffEq, Plots
-
-#Setup
-initial = [0.,0.1,0.5,0]
-tspan = (0,100.)
-
-#Remember, V is the potential of the system and T is the Total Kinetic Energy, thus E will
-#the total energy of the system.
-V(x,y) = 1//2 * (x^2 + y^2 + 2x^2*y - 2//3 * y^3)
-E(x,y,dx,dy) = V(x,y) + 1//2 * (dx^2 + dy^2);
-
-#Define the function
-function Hénon_Heiles(du,u,p,t)
-    x  = u[1]
-    y  = u[2]
-    dx = u[3]
-    dy = u[4]
-    du[1] = dx
-    du[2] = dy
-    du[3] = -x - 2x*y
-    du[4] = y^2 - y -x^2
-end
-
-#Pass to solvers
-prob = ODEProblem(Hénon_Heiles, initial, tspan)
-sol = solve(prob, Vern9(), abs_tol=1e-16, rel_tol=1e-16);
-
- - - - -
-# Plot the orbit
-plot(sol, vars=(1,2), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false)
-
- - - - - -
-#Optional Sanity check - what do you think this returns and why?
-@show sol.retcode
-
- - -
-sol.retcode = :Success
-
- - - -
-#Plot -
-plot(sol, vars=(1,3), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity")
-plot!(sol, vars=(2,4), leg = false)
-
- - - - - -
-#We map the Total energies during the time intervals of the solution (sol.u here) to a new vector
-#pass it to the plotter a bit more conveniently
-energy = map(x->E(x...), sol.u)
-
-#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great.
-@show ΔE = energy[1]-energy[end]
-
- - -
-ΔE = energy[1] - energy[end] = -3.092972023296947e-5
-
- - - -
-#Plot
-plot(sol.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy")
-
- - - - -

Symplectic Integration

-

To prevent energy drift, we can instead use a symplectic integrator. We can directly define and solve the SecondOrderODEProblem:

- - -
-function HH_acceleration!(dv,v,u,p,t)
-    x,y  = u
-    dx,dy = dv
-    dv[1] = -x - 2x*y
-    dv[2] = y^2 - y -x^2
-end
-initial_positions = [0.0,0.1]
-initial_velocities = [0.5,0.0]
-prob = SecondOrderODEProblem(HH_acceleration!,initial_velocities,initial_positions,tspan)
-sol2 = solve(prob, KahanLi8(), dt=1/10);
-
- - - -

Notice that we get the same results:

- - -
-# Plot the orbit
-plot(sol2, vars=(3,4), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false)
-
- - - - - -
-plot(sol2, vars=(3,1), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity")
-plot!(sol2, vars=(4,2), leg = false)
-
- - - - -

but now the energy change is essentially zero:

- - -
-energy = map(x->E(x[3], x[4], x[1], x[2]), sol2.u)
-#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great.
-@show ΔE = energy[1]-energy[end]
-
- - -
-ΔE = energy[1] - energy[end] = 1.0880185641326534e-14
-
- - - -
-#Plot
-plot(sol2.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy")
-
- - - - -

It's so close to zero it breaks GR! And let's try to use a Runge-Kutta-Nyström solver to solve this. Note that Runge-Kutta-Nyström isn't symplectic.

- - -
-sol3 = solve(prob, DPRKN6());
-energy = map(x->E(x[3], x[4], x[1], x[2]), sol3.u)
-@show ΔE = energy[1]-energy[end]
-
- - -
-ΔE = energy[1] - energy[end] = -8.017994408304752e-6
-
- - - -
-gr()
-plot(sol3.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy")
-
- - - - -

Note that we are using the DPRKN6 sovler at reltol=1e-3 (the default), yet it has a smaller energy variation than Vern9 at abs_tol=1e-16, rel_tol=1e-16. Therefore, using specialized solvers to solve its particular problem is very efficient.

- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("models","01-classical_physics.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/models/02-conditional_dosing.html b/html/models/02-conditional_dosing.html deleted file mode 100644 index 27864a59..00000000 --- a/html/models/02-conditional_dosing.html +++ /dev/null @@ -1,876 +0,0 @@ - - - - - - Conditional Dosing Pharmacometric Example - - - - - - - - - - - - - - - - - -
-
-
- -
-

Conditional Dosing Pharmacometric Example

-
Chris Rackauckas
- -
- -

In this example we will show how to model a conditional dosing using the DiscreteCallbacks. The problem is as follows. The patient has a drug A(t) in their system. The concentration of the drug is given as C(t)=A(t)/V for some volume constant V. At t=4, the patient goes to the clinic and is checked. If the concentration of the drug in their body is below 4, then they will receive a new dose.

-

For our model, we will use the simple decay equation. We will write this in the in-place form to make it easy to extend to more complicated examples:

- - -
-using DifferentialEquations
-function f(du,u,p,t)
-    du[1] = -u[1]
-end
-u0 = [10.0]
-const V = 1
-prob = ODEProblem(f,u0,(0.0,10.0))
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 10.0)
-u0: [10.0]
-
- - -

Let's see what the solution looks like without any events.

- - -
-sol = solve(prob,Tsit5())
-using Plots; gr()
-plot(sol)
-
- - - - -

We see that at time t=4, the patient should receive a dose. Let's code up that event. We need to check at t=4 if the concentration u[1]/4 is <4, and if so, add 10 to u[1]. We do this with the following:

- - -
-condition(u,t,integrator) = t==4 && u[1]/V<4
-affect!(integrator) = integrator.u[1] += 10
-cb = DiscreteCallback(condition,affect!)
-
- - -
-DiffEqBase.DiscreteCallback{typeof(Main.WeaveSandBox18.condition),typeof(Ma
-in.WeaveSandBox18.affect!),typeof(DiffEqBase.INITIALIZE_DEFAULT)}(Main.Weav
-eSandBox18.condition, Main.WeaveSandBox18.affect!, DiffEqBase.INITIALIZE_DE
-FAULT, Bool[true, true])
-
- - -

Now we will give this callback to the solver, and tell it to stop at t=4 so that way the condition can be checked:

- - -
-sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb)
-using Plots; gr()
-plot(sol)
-
- - - - -

Let's show that it actually added 10 instead of setting the value to 10. We could have set the value using affect!(integrator) = integrator.u[1] = 10

- - -
-println(sol(4.00000))
-
- - -
-[0.183164]
-
- - - -
-println(sol(4.000000000001))
-
- - -
-[10.1832]
-
- - -

Now let's model a patient whose decay rate for the drug is lower:

- - -
-function f(du,u,p,t)
-    du[1] = -u[1]/6
-end
-u0 = [10.0]
-const V = 1
-prob = ODEProblem(f,u0,(0.0,10.0))
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 10.0)
-u0: [10.0]
-
- - - -
-sol = solve(prob,Tsit5())
-using Plots; gr()
-plot(sol)
-
- - - - -

Under the same criteria, with the same event, this patient will not receive a second dose:

- - -
-sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb)
-using Plots; gr()
-plot(sol)
-
- - - - - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("models","02-conditional_dosing.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/models/03-diffeqbio_I_introduction.html b/html/models/03-diffeqbio_I_introduction.html deleted file mode 100644 index ef931196..00000000 --- a/html/models/03-diffeqbio_I_introduction.html +++ /dev/null @@ -1,1023 +0,0 @@ - - - - - - DiffEqBiological Tutorial I: Introduction - - - - - - - - - - - - - - - - - -
-
-
- -
-

DiffEqBiological Tutorial I: Introduction

-
Samuel Isaacson
- -
- -

DiffEqBiological.jl is a domain specific language (DSL) for writing chemical reaction networks in Julia. The generated chemical reaction network model can then be translated into a variety of mathematical models which can be solved using components of the broader DifferentialEquations.jl ecosystem.

-

In this tutorial we'll provide an introduction to using DiffEqBiological to specify chemical reaction networks, and then to solve ODE, jump, tau-leaping and SDE models generated from them. Let's start by using the DiffEqBiological reaction_network macro to specify a simply chemical reaction network; the well-known Repressilator.

-

We first import the basic packages we'll need, and use Plots.jl for making figures:

- - -
-# If not already installed, first hit "]" within a Julia REPL. Then type:
-# add DifferentialEquations DiffEqBiological PyPlot Plots Latexify 
-
-using DifferentialEquations, DiffEqBiological, Plots, Latexify
-pyplot(fmt=:svg);
-
- - - -

We now construct the reaction network. The basic types of arrows and predefined rate laws one can use are discussed in detail within the DiffEqBiological Chemical Reaction Models documentation. Here we use a mix of first order, zero order and repressive Hill function rate laws. Note, $\varnothing$ corresponds to the empty state, and is used for zeroth order production and first order degradation reactions:

- - -
-repressilator = @reaction_network begin
-    hillr(P₃,α,K,n),  --> m₁
-    hillr(P₁,α,K,n),  --> m₂
-    hillr(P₂,α,K,n),  --> m₃
-    (δ,γ), m₁  
-    (δ,γ), m₂  
-    (δ,γ), m₃  
-    β, m₁ --> m₁ + P₁
-    β, m₂ --> m₂ + P₂
-    β, m₃ --> m₃ + P₃
-    μ, P₁ --> 
-    μ, P₂ --> 
-    μ, P₃ --> 
-end α K n δ γ β μ;
-
- - - -

We can use Latexify to look at the corresponding reactions and understand the generated rate laws for each reaction

- - -
-latexify(repressilator; env=:chemical)
-
- - - - -\begin{align*} -\require{mhchem} -\ce{ \varnothing &->[\frac{\alpha \cdot K^{n}}{K^{n} + P_3^{n}}] m_{1}}\\ -\ce{ \varnothing &->[\frac{\alpha \cdot K^{n}}{K^{n} + P_1^{n}}] m_{2}}\\ -\ce{ \varnothing &->[\frac{\alpha \cdot K^{n}}{K^{n} + P_2^{n}}] m_{3}}\\ -\ce{ m_{1} &<=>[\delta][\gamma] \varnothing}\\ -\ce{ m_{2} &<=>[\delta][\gamma] \varnothing}\\ -\ce{ m_{3} &<=>[\delta][\gamma] \varnothing}\\ -\ce{ m_{1} &->[\beta] m_{1} + P_{1}}\\ -\ce{ m_{2} &->[\beta] m_{2} + P_{2}}\\ -\ce{ m_{3} &->[\beta] m_{3} + P_{3}}\\ -\ce{ P_{1} &->[\mu] \varnothing}\\ -\ce{ P_{2} &->[\mu] \varnothing}\\ -\ce{ P_{3} &->[\mu] \varnothing} -\end{align*} - - -

We can also use Latexify to look at the corresponding ODE model for the chemical system

- - -
-latexify(repressilator, cdot=false)
-
- - - - -\begin{align*} -\frac{dm₁(t)}{dt} =& \frac{\alpha K^{n}}{K^{n} + P_3^{n}} - \delta m_1 + \gamma \\ -\frac{dm₂(t)}{dt} =& \frac{\alpha K^{n}}{K^{n} + P_1^{n}} - \delta m_2 + \gamma \\ -\frac{dm₃(t)}{dt} =& \frac{\alpha K^{n}}{K^{n} + P_2^{n}} - \delta m_3 + \gamma \\ -\frac{dP₁(t)}{dt} =& \beta m_1 - \mu P_1 \\ -\frac{dP₂(t)}{dt} =& \beta m_2 - \mu P_2 \\ -\frac{dP₃(t)}{dt} =& \beta m_3 - \mu P_3 -\end{align*} - - -

To solve the ODEs we need to specify the values of the parameters in the model, the initial condition, and the time interval to solve the model on. To do this it helps to know the orderings of the parameters and the species. Parameters are ordered in the same order they appear after the end statement in the @reaction_network macro. Species are ordered in the order they first appear within the @reaction_network macro. We can see these orderings using the speciesmap and paramsmap functions:

- - -
-speciesmap(repressilator)
-
- - -
-OrderedCollections.OrderedDict{Symbol,Int64} with 6 entries:
-  :m₁ => 1
-  :m₂ => 2
-  :m₃ => 3
-  :P₁ => 4
-  :P₂ => 5
-  :P₃ => 6
-
- - - -
-paramsmap(repressilator)
-
- - -
-OrderedCollections.OrderedDict{Symbol,Int64} with 7 entries:
-  :α => 1
-  :K => 2
-  :n => 3
-  :δ => 4
-  :γ => 5
-  :β => 6
-  :μ => 7
-
- - -

Solving the ODEs:

-

Knowing these orderings, we can create parameter and initial condition vectors, and setup the ODEProblem we want to solve:

- - -
-# parameters [α,K,n,δ,γ,β,μ]
-p = (.5, 40, 2, log(2)/120, 5e-3, 20*log(2)/120, log(2)/60)
-
-# initial condition [m₁,m₂,m₃,P₁,P₂,P₃]
-u₀ = [0.,0.,0.,20.,0.,0.]
-
-# time interval to solve on
-tspan = (0., 10000.)
-
-# create the ODEProblem we want to solve
-oprob = ODEProblem(repressilator, u₀, tspan, p)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 10000.0)
-u0: [0.0, 0.0, 0.0, 20.0, 0.0, 0.0]
-
- - -

At this point we are all set to solve the ODEs. We can now use any ODE solver from within the DiffEq package. We'll just use the default DifferentialEquations solver for now, and then plot the solutions:

- - -
-sol = solve(oprob, saveat=10.)
-plot(sol, fmt=:svg)
-
- - - - -

We see the well-known oscillatory behavior of the repressilator! For more on choices of ODE solvers, see the JuliaDiffEq documentation.

-
-

Stochastic Simulation Algorithms (SSAs) for Stochastic Chemical Kinetics

-

Let's now look at a stochastic chemical kinetics model of the repressilator, modeling it with jump processes. Here we will construct a DiffEqJump JumpProblem that uses Gillespie's Direct method, and then solve it to generate one realization of the jump process:

- - -
-# first we redefine the initial condition to be integer valued
-u₀ = [0,0,0,20,0,0]
-
-# next we create a discrete problem to encode that our species are integer valued:
-dprob = DiscreteProblem(repressilator, u₀, tspan, p)
-
-# now we create a JumpProblem, and specify Gillespie's Direct Method as the solver:
-jprob = JumpProblem(dprob, Direct(), repressilator, save_positions=(false,false))
-
-# now let's solve and plot the jump process:
-sol = solve(jprob, SSAStepper(), saveat=10.)
-plot(sol, fmt=:svg)
-
- - - - -

Here we see that oscillations remain, but become much noiser. Note, in constructing the JumpProblem we could have used any of the SSAs that are part of DiffEqJump instead of the Direct method, see the list of SSAs (i.e. constant rate jump aggregators) in the documentation.

-
-

$\tau$-leaping Methods:

-

While SSAs generate exact realizations for stochastic chemical kinetics jump process models, $\tau$-leaping methods offer a performant alternative by discretizing in time the underlying time-change representation of the stochastic process. The DiffEqJump package has limited support for $\tau$-leaping methods in the form of the basic Euler's method type approximation proposed by Gillespie. We can simulate a $\tau$-leap approximation to the repressilator by using the RegularJump representation of the network to construct a JumpProblem:

- - -
-rjs = regularjumps(repressilator)
-lprob = JumpProblem(dprob, Direct(), rjs)
-lsol = solve(lprob, SimpleTauLeaping(), dt=.1)
-plot(lsol, plotdensity=1000, fmt=:svg)
-
- - - - -
-

Chemical Langevin Equation (CLE) Stochastic Differential Equation (SDE) Models:

-

At an intermediary physical scale between macroscopic ODE models and microscopic stochastic chemical kinetic models lies the CLE, a SDE version of the model. The SDEs add to each ODE above a noise term. As the repressilator has species that get very close to zero in size, it is not a good candidate to model with the CLE (where solutions can then go negative and become unphysical). Let's create a simpler reaction network for a birth-death process that will stay non-negative:

- - -
-bdp = @reaction_network begin
-  c₁, X --> 2X
-  c₂, X --> 0
-  c₃, 0 --> X
-end c₁ c₂ c₃
-p = (1.0,2.0,50.)
-u₀ = [5.]
-tspan = (0.,4.);
-
- - - -

The corresponding Chemical Langevin Equation SDE is then

- - -
-latexify(bdp, noise=true, cdot=false)
-
- - - - -\begin{align*} -\mathrm{dX}\left( t \right) =& \left( c_1 X - c_2 X + c_3 \right) dt + \sqrt{\left\|c_1 X\right\|} \mathrm{dW_1}\left( t \right) - \sqrt{\left\|c_2 X\right\|} \mathrm{dW_2}\left( t \right) + \sqrt{\left\|c_3\right\|} \mathrm{dW_3}\left( t \right) -\end{align*} - - -

where each $W_i(t)$ denotes an independent Brownian Motion. We can solve the CLE SDE model by creating an SDEProblem and solving it similar to what we did for ODEs above:

- - -
-# SDEProblem for CLE
-sprob = SDEProblem(bdp, u₀, tspan, p)
-
-# solve and plot, tstops is used to specify enough points 
-# that the plot looks well-resolved
-sol = solve(sprob, tstops=range(0., step=4e-3, length=1001))
-plot(sol, fmt=:svg)
-
- - - - -

We again have complete freedom to select any of the StochasticDifferentialEquations.jl SDE solvers, see the documentation.

-
-

What information can be queried from the reaction_network:

-

The generated reaction_network contains a lot of basic information. For example

-
    -
  • f=oderhsfun(repressilator) is a function f(du,u,p,t) that given the current state vector u and time t fills du with the time derivatives of u (i.e. the right hand side of the ODEs).

    -
  • -
  • jac=jacfun(repressilator) is a function jac(J,u,p,t) that evaluates and returns the Jacobian of the ODEs in J. A corresponding Jacobian matrix of expressions can be accessed using the jacobianexprs function:

    -
  • -
- - -
-latexify(jacobianexprs(repressilator), cdot=false)
-
- - - - -\begin{equation*} -\left[ -\begin{array}{cccccc} - - \delta & 0 & 0 & 0 & 0 & \frac{ - K^{n} n \alpha P_3^{-1 + n}}{\left( K^{n} + P_3^{n} \right)^{2}} \\ -0 & - \delta & 0 & \frac{ - K^{n} n \alpha P_1^{-1 + n}}{\left( K^{n} + P_1^{n} \right)^{2}} & 0 & 0 \\ -0 & 0 & - \delta & 0 & \frac{ - K^{n} n \alpha P_2^{-1 + n}}{\left( K^{n} + P_2^{n} \right)^{2}} & 0 \\ -\beta & 0 & 0 & - \mu & 0 & 0 \\ -0 & \beta & 0 & 0 & - \mu & 0 \\ -0 & 0 & \beta & 0 & 0 & - \mu \\ -\end{array} -\right] -\end{equation*} - - -
    -
  • pjac = paramjacfun(repressilator) is a function pjac(pJ,u,p,t) that evaluates and returns the Jacobian, pJ, of the ODEs with respect to the parameters. This allows reaction_networks to be used in the DifferentialEquations.jl local sensitivity analysis package DiffEqSensitivity.

    -
  • -
-

By default, generated ODEProblems will be passed the corresponding Jacobian function, which will then be used within implicit ODE/SDE methods.

-

The DiffEqBiological API documentation provides a thorough description of the many query functions that are provided to access network properties and generated functions. In DiffEqBiological Tutorial II we'll explore the API.

-
-

Getting Help

-

Have a question related to DiffEqBiological or this tutorial? Feel free to ask in the DifferentialEquations.jl Gitter. If you think you've found a bug in DiffEqBiological, or would like to request/discuss new functionality, feel free to open an issue on Github (but please check there is no related issue already open). If you've found a bug in this tutorial, or have a suggestion, feel free to open an issue on the DiffEqTutorials Github site. Or, submit a pull request to DiffEqTutorials updating the tutorial!

-
- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("models","03-diffeqbio_I_introduction.jmd")
-
-

Computer Information:

-
-
Julia Version 1.2.0
-Commit c6da87ff4b (2019-08-20 00:03 UTC)
-Platform Info:
-  OS: macOS (x86_64-apple-darwin18.6.0)
-  CPU: Intel(R) Core(TM) i7-6920HQ CPU @ 2.90GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.2/Project.toml`
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.3
-[a93c6f00-e57d-5684-b7b6-d8193f3e46c0] DataFrames 0.19.4
-[2b5f629d-d688-5b77-993f-72d75c75574e] DiffEqBase 6.3.4
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 4.0.1
-[c894b116-72e5-5b58-be3c-e6d8d4ac2b12] DiffEqJump 6.2.2
-[a077e3f3-b75c-5d7f-a0c6-6bc4c8ec64a9] DiffEqProblemLibrary 4.5.1
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.8.0
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.20.0
-[42fd0dbc-a981-5370-80f2-aaf504508153] IterativeSolvers 0.8.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.11.0
-[54ca160b-1b9f-5127-a996-1867f4bc2a2c] ODEInterface 0.4.6
-[47be7bcc-f1a6-5447-8b36-7eeeff7534fd] ORCA 0.3.0
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.17.2
-[f0f68f2c-4968-5e81-91da-67840de0976a] PlotlyJS 0.13.0
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.27.0
-[438e738f-606a-5dbb-bf0a-cddfbfd45ab0] PyCall 1.91.2
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.2
-[b4db0fb7-de2a-5028-82bf-5021f5cfa881] ReactionNetworkImporters 0.1.5
-[295af30f-e4ad-537b-8983-00126c2a3abe] Revise 2.2.0
-[789caeaf-c7a9-5a7d-9973-96adeb23e2a0] StochasticDiffEq 6.11.2
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.7.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.1
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[d6f4376e-aef5-505a-96c1-9c027394607a] Markdown
-
- - - -
- - - -
-
-
- - diff --git a/html/models/04-diffeqbio_II_networkproperties.html b/html/models/04-diffeqbio_II_networkproperties.html deleted file mode 100644 index a76cded8..00000000 --- a/html/models/04-diffeqbio_II_networkproperties.html +++ /dev/null @@ -1,1448 +0,0 @@ - - - - - - DiffEqBiological Tutorial II: Network Properties API - - - - - - - - - - - - - - - - - -
-
-
- -
-

DiffEqBiological Tutorial II: Network Properties API

-
Samuel Isaacson
- -
- -

The DiffEqBiological API provides a collection of functions for easily accessing network properties, and for incrementally building and extending a network. In this tutorial we'll go through the API, and then illustrate how to programmatically construct a network.

-

We'll illustrate the API using a toggle-switch like network that contains a variety of different reaction types:

- - -
-using DifferentialEquations, DiffEqBiological, Latexify, Plots
-fmt = :svg
-pyplot(fmt=fmt)
-rn = @reaction_network begin
-    hillr(D₂,α,K,n),  --> m₁
-    hillr(D₁,α,K,n),  --> m₂
-    (δ,γ), m₁  
-    (δ,γ), m₂  
-    β, m₁ --> m₁ + P₁
-    β, m₂ --> m₂ + P₂
-    μ, P₁ --> 
-    μ, P₂ --> 
-    (k₊,k₋), 2P₁  D₁ 
-    (k₊,k₋), 2P₂  D₂
-    (k₊,k₋), P₁+P₂  T
-end α K n δ γ β μ k₊ k₋;
-
- - - -

This corresponds to the chemical reaction network given by

- - -
-latexify(rn; env=:chemical)
-
- - - - -\begin{align*} -\require{mhchem} -\ce{ \varnothing &->[\frac{\alpha \cdot K^{n}}{K^{n} + D_2^{n}}] m_{1}}\\ -\ce{ \varnothing &->[\frac{\alpha \cdot K^{n}}{K^{n} + D_1^{n}}] m_{2}}\\ -\ce{ m_{1} &<=>[\delta][\gamma] \varnothing}\\ -\ce{ m_{2} &<=>[\delta][\gamma] \varnothing}\\ -\ce{ m_{1} &->[\beta] m_{1} + P_{1}}\\ -\ce{ m_{2} &->[\beta] m_{2} + P_{2}}\\ -\ce{ P_{1} &->[\mu] \varnothing}\\ -\ce{ P_{2} &->[\mu] \varnothing}\\ -\ce{ 2 \cdot P_1 &<=>[k_{+}][k_{-}] D_{1}}\\ -\ce{ 2 \cdot P_2 &<=>[k_{+}][k_{-}] D_{2}}\\ -\ce{ P_{1} + P_{2} &<=>[k_{+}][k_{-}] T} -\end{align*} - - -
-

Network Properties

-

Basic properties of the generated network include the speciesmap and paramsmap functions we examined in the last tutorial, along with the corresponding species and params functions:

- - -
-species(rn)
-
- - -
-7-element Array{Symbol,1}:
- :m₁
- :m₂
- :P₁
- :P₂
- :D₁
- :D₂
- :T
-
- - - -
-params(rn)
-
- - -
-9-element Array{Symbol,1}:
- :α 
- :K 
- :n 
- :δ 
- :γ 
- :β 
- :μ 
- :k₊
- :k₋
-
- - -

The numbers of species, parameters and reactions can be accessed using numspecies(rn), numparams(rn) and numreactions(rn).

-

A number of functions are available to access properties of reactions within the generated network, including substrates, products, dependents, ismassaction, substratestoich, substratesymstoich, productstoich, productsymstoich, and netstoich. Each of these functions takes two arguments, the reaction network rn and the index of the reaction to query information about. For example, to find the substrate symbols and their corresponding stoichiometries for the 11th reaction, 2P₁ --> D₁, we would use

- - -
-substratesymstoich(rn, 11)
-
- - -
-1-element Array{DiffEqBiological.ReactantStruct,1}:
- DiffEqBiological.ReactantStruct(:P₁, 2)
-
- - -

Broadcasting works on all these functions, allowing the construction of a vector holding the queried information across all reactions, i.e.

- - -
-substratesymstoich.(rn, 1:numreactions(rn))
-
- - -
-16-element Array{Array{DiffEqBiological.ReactantStruct,1},1}:
- []                                              
- []                                              
- [ReactantStruct(:m₁, 1)]                        
- []                                              
- [ReactantStruct(:m₂, 1)]                        
- []                                              
- [ReactantStruct(:m₁, 1)]                        
- [ReactantStruct(:m₂, 1)]                        
- [ReactantStruct(:P₁, 1)]                        
- [ReactantStruct(:P₂, 1)]                        
- [ReactantStruct(:P₁, 2)]                        
- [ReactantStruct(:D₁, 1)]                        
- [ReactantStruct(:P₂, 2)]                        
- [ReactantStruct(:D₂, 1)]                        
- [ReactantStruct(:P₁, 1), ReactantStruct(:P₂, 1)]
- [ReactantStruct(:T, 1)]
-
- - -

To see the net stoichiometries for all reactions we would use

- - -
-netstoich.(rn, 1:numreactions(rn))
-
- - -
-16-element Array{Array{Pair{Int64,Int64},1},1}:
- [1=>1]              
- [2=>1]              
- [1=>-1]             
- [1=>1]              
- [2=>-1]             
- [2=>1]              
- [3=>1]              
- [4=>1]              
- [3=>-1]             
- [4=>-1]             
- [3=>-2, 5=>1]       
- [3=>2, 5=>-1]       
- [4=>-2, 6=>1]       
- [4=>2, 6=>-1]       
- [3=>-1, 4=>-1, 7=>1]
- [3=>1, 4=>1, 7=>-1]
-
- - -

Here the first integer in each pair corresponds to the index of the species (with symbol species(rn)[index]). The second integer corresponds to the net stoichiometric coefficient of the species within the reaction. substratestoich and productstoich are defined similarly.

-

Several functions are also provided that calculate different types of dependency graphs. These include rxtospecies_depgraph, which provides a mapping from reaction index to the indices of species whose population changes when the reaction occurs:

- - -
-rxtospecies_depgraph(rn)
-
- - -
-16-element Array{Array{Int64,1},1}:
- [1]      
- [2]      
- [1]      
- [1]      
- [2]      
- [2]      
- [3]      
- [4]      
- [3]      
- [4]      
- [3, 5]   
- [3, 5]   
- [4, 6]   
- [4, 6]   
- [3, 4, 7]
- [3, 4, 7]
-
- - -

Here the last row indicates that the species with indices [3,4,7] will change values when the reaction T --> P₁ + P₂ occurs. To confirm these are the correct species we can look at

- - -
-species(rn)[[3,4,7]]
-
- - -
-3-element Array{Symbol,1}:
- :P₁
- :P₂
- :T
-
- - -

The speciestorx_depgraph similarly provides a mapping from species to reactions for which their rate laws depend on that species. These correspond to all reactions for which the given species is in the dependent set of the reaction. We can verify this for the first species, m₁:

- - -
-speciestorx_depgraph(rn)[1]
-
- - -
-2-element Array{Int64,1}:
- 3
- 7
-
- - - -
-findall(depset -> in(:m₁, depset), dependents.(rn, 1:numreactions(rn)))
-
- - -
-2-element Array{Int64,1}:
- 3
- 7
-
- - -

Finally, rxtorx_depgraph provides a mapping that shows when a given reaction occurs, which other reactions have rate laws that involve species whose value would have changed:

- - -
-rxtorx_depgraph(rn)
-
- - -
-16-element Array{Array{Int64,1},1}:
- [1, 3, 7]              
- [2, 5, 8]              
- [3, 7]                 
- [3, 4, 7]              
- [5, 8]                 
- [5, 6, 8]              
- [7, 9, 11, 15]         
- [8, 10, 13, 15]        
- [9, 11, 15]            
- [10, 13, 15]           
- [2, 9, 11, 12, 15]     
- [2, 9, 11, 12, 15]     
- [1, 10, 13, 14, 15]    
- [1, 10, 13, 14, 15]    
- [9, 10, 11, 13, 15, 16]
- [9, 10, 11, 13, 15, 16]
-
- - -

Note on Using Network Property API Functions

-

Many basic network query and reaction property functions are simply accessors, returning information that is already stored within the generated reaction_network. For these functions, modifying the returned data structures may lead to inconsistent internal state within the network. As such, they should be used for accessing, but not modifying, network properties. The API documentation indicates which functions return newly allocated data structures and which return data stored within the reaction_network.

-
-

Incremental Construction of Networks

-

The @reaction_network macro is monolithic, in that it not only constructs and stores basic network properties such as the reaction stoichiometries, but also generates everything needed to immediately solve ODE, SDE and jump models using the network. This includes Jacobian functions, noise functions, and jump functions for each reaction. While this allows for a compact interface to the DifferentialEquations.jl solvers, it can also be computationally expensive for large networks, where a user may only wish to solve one type of problem and/or have fine-grained control over what is generated. In addition, some types of reaction network structures are more amenable to being constructed programmatically, as opposed to writing out all reactions by hand within one macro. For these reasons DiffEqBiological provides two additional macros that only initially setup basic reaction network properties, and which can be extended through a programmatic interface: @min_reaction_network and @empty_reaction_network. We now give an introduction to constructing these more minimal network representations, and how they can be programmatically extended. See also the relevant API section.

-

The @min_reaction_network macro works identically to the @reaction_network macro, but the generated network will only be complete with respect to its representation of chemical network properties (i.e. species, parameters and reactions). No ODE, SDE or jump models are generated during the macro call. It can subsequently be extended with the addition of new species, parameters or reactions. The @empty_reaction_network allocates an empty network structure that can also be extended using the programmatic interface. For example, consider a partial version of the toggle-switch like network we defined above:

- - -
-rnmin = @min_reaction_network begin
-    (δ,γ), m₁  
-    (δ,γ), m₂  
-    β, m₁ --> m₁ + P₁
-    β, m₂ --> m₂ + P₂
-    μ, P₁ --> 
-    μ, P₂ --> 
-end δ γ β μ;
-
- - - -

Here we have left out the first two, and last three, reactions from the original reaction_network. To expand the network until it is functionally equivalent to the original model we add back in the missing species, parameters, and finally the missing reactions. Note, it is required that species and parameters be defined before any reactions using them are added. The necessary network extension functions are given by addspecies!, addparam! and addreaction!, and described in the API. To complete rnmin we first add the relevant species:

- - -
-addspecies!(rnmin, :D₁)
-addspecies!(rnmin, :D₂)
-addspecies!(rnmin, :T)
-
- - - -

Next we add the needed parameters

- - -
-addparam!(rnmin, )
-addparam!(rnmin, :K)
-addparam!(rnmin, :n)
-addparam!(rnmin, :k₊)
-addparam!(rnmin, :k₋)
-
- - - -

Note, both addspecies! and addparam! also accept strings encoding the variable names (which are then converted to Symbols internally).

-

We are now ready to add the missing reactions. The API provides two forms of the addreaction! function, one takes expressions analogous to what one would write in the macro:

- - -
-addreaction!(rnmin, :(hillr(D₁,α,K,n)), :( --> m₂))
-addreaction!(rnmin, :((k₊,k₋)), :(2P₂  D₂))
-addreaction!(rnmin, :k₊, :(2P₁ --> D₁))
-addreaction!(rnmin, :k₋, :(D₁ --> 2P₁))
-
- - - -

The rate can be an expression or symbol as above, but can also just be a numeric value. The second form of addreaction! takes tuples of Pair{Symbol,Int} that encode the stoichiometric coefficients of substrates and reactants:

- - -
-# signature is addreaction!(rnmin, paramexpr, substratestoich, productstoich)
-addreaction!(rnmin, :(hillr(D₂,α,K,n)), (), (:m₁ => 1,))
-addreaction!(rnmin, :k₊, (:P₁=>1, :P₂=>1), (:T=>1,))
-addreaction!(rnmin, :k₋, (:T=>1,), (:P₁=>1, :P₂=>1))
-
- - - -

Let's check that rn and rnmin have the same set of species:

- - -
-setdiff(species(rn), species(rnmin))
-
- - -
-0-element Array{Symbol,1}
-
- - -

the same set of params:

- - -
-setdiff(params(rn), params(rnmin))
-
- - -
-0-element Array{Symbol,1}
-
- - -

and the final reaction has the same substrates, reactions, and rate expression:

- - -
-rxidx = numreactions(rn)
-setdiff(substrates(rn, rxidx), substrates(rnmin, rxidx))
-
- - -
-0-element Array{Symbol,1}
-
- - - -
-setdiff(products(rn, rxidx), products(rnmin, rxidx))
-
- - -
-0-element Array{Symbol,1}
-
- - - -
-rateexpr(rn, rxidx) == rateexpr(rnmin, rxidx)
-
- - -
-true
-
- - -
-

Extending Incrementally Generated Networks to Include ODEs, SDEs or Jumps

-

Once a network generated from @min_reaction_network or @empty_reaction_network has had all the associated species, parameters and reactions filled in, corresponding ODE, SDE or jump models can be constructed. The relevant API functions are addodes!, addsdes! and addjumps!. One benefit to contructing models with these functions is that they offer more fine-grained control over what actually gets constructed. For example, addodes! has the optional keyword argument, build_jac, which if set to false will disable construction of symbolic Jacobians and functions for evaluating Jacobians. For large networks this can give a significant speed-up in the time required for constructing an ODE model. Each function and its associated keyword arguments are described in the API section, Functions to add ODEs, SDEs or Jumps to a Network.

-

Let's extend rnmin to include the needed functions for use in ODE solvers:

- - -
-addodes!(rnmin)
-
- - - -

The Generated Functions for Models section of the API shows what functions have been generated. For ODEs these include oderhsfun(rnmin), which returns a function of the form f(du,u,p,t) which evaluates the ODEs (i.e. the time derivatives of u) within du. For each generated function, the corresponding expressions from which it was generated can be retrieved using accessors from the Generated Expressions section of the API. The equations within du can be retrieved using the odeexprs(rnmin) function. For example:

- - -
-odeexprs(rnmin)
-
- - -
-7-element Array{Union{Float64, Int64, Expr, Symbol},1}:
- :((-(δ * m₁) + γ) + (α * K ^ n) / (K ^ n + D₂ ^ n))                       
-               
- :((-(δ * m₂) + γ) + (α * K ^ n) / (K ^ n + D₁ ^ n))                       
-               
- :(((((β * m₁ - μ * P₁) + -2 * (k₊ / 2) * P₁ ^ 2) + 2 * k₋ * D₁) - k₊ * P₁ 
-* P₂) + k₋ * T)
- :(((((β * m₂ - μ * P₂) + -2 * (k₊ / 2) * P₂ ^ 2) + 2 * k₋ * D₂) - k₊ * P₁ 
-* P₂) + k₋ * T)
- :((k₊ / 2) * P₁ ^ 2 - k₋ * D₁)                                            
-               
- :((k₊ / 2) * P₂ ^ 2 - k₋ * D₂)                                            
-               
- :(k₊ * P₁ * P₂ - k₋ * T)
-
- - -

Using Latexify we can see the ODEs themselves to compare with these expressions:

- - -
-latexify(rnmin)
-
- - - - -\begin{align*} -\frac{dm_1}{dt} =& - \delta \cdot m_1 + \gamma + \frac{\alpha \cdot K^{n}}{K^{n} + D_2^{n}} \\ -\frac{dm_2}{dt} =& - \delta \cdot m_2 + \gamma + \frac{\alpha \cdot K^{n}}{K^{n} + D_1^{n}} \\ -\frac{dP_1}{dt} =& \beta \cdot m_1 - \mu \cdot P_1 -2 \cdot \frac{k_+}{2} \cdot P_1^{2} + 2 \cdot k_- \cdot D_1 - k_+ \cdot P_1 \cdot P_2 + k_- \cdot T \\ -\frac{dP_2}{dt} =& \beta \cdot m_2 - \mu \cdot P_2 -2 \cdot \frac{k_+}{2} \cdot P_2^{2} + 2 \cdot k_- \cdot D_2 - k_+ \cdot P_1 \cdot P_2 + k_- \cdot T \\ -\frac{dD_1}{dt} =& \frac{k_+}{2} \cdot P_1^{2} - k_- \cdot D_1 \\ -\frac{dD_2}{dt} =& \frac{k_+}{2} \cdot P_2^{2} - k_- \cdot D_2 \\ -\frac{dT}{dt} =& k_+ \cdot P_1 \cdot P_2 - k_- \cdot T \\ -\end{align*} - - -

For ODEs two other functions are generated by addodes!. jacfun(rnmin) will return the generated Jacobian evaluation function, fjac(dJ,u,p,t), which given the current solution u evaluates the Jacobian within dJ. jacobianexprs(rnmin) gives the corresponding matrix of expressions, which can be used with Latexify to see the Jacobian:

- - -
-latexify(jacobianexprs(rnmin))
-
- - - - -\begin{equation*} -\left[ -\begin{array}{ccccccc} - - \delta & 0 & 0 & 0 & 0 & \frac{ - K^{n} \cdot n \cdot \alpha \cdot D_2^{-1 + n}}{\left( K^{n} + D_2^{n} \right)^{2}} & 0 \\ -0 & - \delta & 0 & 0 & \frac{ - K^{n} \cdot n \cdot \alpha \cdot D_1^{-1 + n}}{\left( K^{n} + D_1^{n} \right)^{2}} & 0 & 0 \\ -\beta & 0 & - \mu - 2 \cdot k_+ \cdot P_1 - k_+ \cdot P_2 & - k_+ \cdot P_1 & 2 \cdot k_- & 0 & k_{-} \\ -0 & \beta & - k_+ \cdot P_2 & - \mu - 2 \cdot k_+ \cdot P_2 - k_+ \cdot P_1 & 0 & 2 \cdot k_- & k_{-} \\ -0 & 0 & k_+ \cdot P_1 & 0 & - k_- & 0 & 0 \\ -0 & 0 & 0 & k_+ \cdot P_2 & 0 & - k_- & 0 \\ -0 & 0 & k_+ \cdot P_2 & k_+ \cdot P_1 & 0 & 0 & - k_- \\ -\end{array} -\right] -\end{equation*} - - -

addodes! also generates a function that evaluates the Jacobian of the ODE derivative functions with respect to the parameters. paramjacfun(rnmin) then returns the generated function. It has the form fpjac(dPJ,u,p,t), which given the current solution u evaluates the Jacobian matrix with respect to parameters p within dPJ. For use in DifferentialEquations.jl solvers, an ODEFunction representation of the ODEs is available from odefun(rnmin).

-

addsdes! and addjumps! work similarly to complete the network for use in StochasticDiffEq and DiffEqJump solvers.

-

Note on Using Generated Function and Expression API Functions

-

The generated functions and expressions accessible through the API require first calling the appropriate addodes!, addsdes or addjumps function. These are responsible for actually constructing the underlying functions and expressions. The API accessors simply return already constructed functions and expressions that are stored within the reaction_network structure.

-
-

Example of Generating a Network Programmatically

-

For a user directly typing in a reaction network, it is generally easier to use the @min_reaction_network or @reaction_network macros to fully specify reactions. However, for large, structured networks it can be much easier to generate the network programmatically. For very large networks, with tens of thousands of reactions, the form of addreaction! that uses stoichiometric coefficients should be preferred as it offers substantially better performance. To put together everything we've seen, let's generate the network corresponding to a 1D continuous time random walk, approximating the diffusion of molecules within an interval.

-

The basic "reaction" network we wish to study is

-

\[ -u_1 \leftrightarrows u_2 \leftrightarrows u_3 \cdots \leftrightarrows u_{N} -\]

-

for $N$ lattice sites on $[0,1]$. For $h = 1/N$ the lattice spacing, we'll assume the rate molecules hop from their current site to any particular neighbor is just $h^{-2}$. We can interpret this hopping process as a collection of $2N-2$ "reactions", with the form $u_i \to u_j$ for $j=i+1$ or $j=i-1$. We construct the corresponding reaction network as follows. First we set values for the basic parameters:

- - -
-N = 64
-h = 1 / N
-
- - -
-0.015625
-
- - -

then we create an empty network, and add each species

- - -
-rn = @empty_reaction_network
-
-for i = 1:N
-    addspecies!(rn, Symbol(:u, i))
-end
-
- - - -

We next add one parameter β, which we will set equal to the hopping rate of molecules, $h^{-2}$:

- - -
-addparam!(rn, )
-
- - - -

Finally, we add in the $2N-2$ possible hopping reactions:

- - -
-for i = 1:N
-    (i < N) && addreaction!(rn, , (Symbol(:u,i)=>1,), (Symbol(:u,i+1)=>1,))
-    (i > 1) && addreaction!(rn, , (Symbol(:u,i)=>1,), (Symbol(:u,i-1)=>1,))
-end
-
- - - -

Let's first construct an ODE model for the network

- - -
-addodes!(rn)
-
- - - -

We now need to specify the initial condition, parameter vector and time interval to solve on. We start with 10000 molecules placed at the center of the domain, and setup an ODEProblem to solve:

- - -
-u₀ = zeros(N)
-u₀[div(N,2)] = 10000
-p = [1/(h*h)]
-tspan = (0.,.01)
-oprob = ODEProblem(rn, u₀, tspan, p)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 0.01)
-u0: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0  …  0.0, 0.0, 0.0, 0.
-0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
-
- - -

We are now ready to solve the problem and plot the solution. Since we have essentially generated a method of lines discretization of the diffusion equation with a discontinuous initial condition, we'll use an A-L stable implicit ODE solver, Rodas5, and plot the solution at a few times:

- - -
-sol = solve(oprob, Rodas5())
-times = [0., .0001, .001, .01]
-plt = plot()
-for time in times
-    plot!(plt, 1:N, sol(time), fmt=fmt, xlabel="i", ylabel="uᵢ", label=string("t = ", time), lw=3)
-end
-plot(plt, ylims=(0.,10000.))
-
- - - - -

Here we see the characteristic diffusion of molecules from the center of the domain, resulting in a shortening and widening of the solution as $t$ increases.

-

Let's now look at a stochastic chemical kinetics jump process version of the model, where β gives the probability per time each molecule can hop from its current lattice site to an individual neighboring site. We first add in the jumps, disabling regular_jumps since they are not needed, and using the minimal_jumps flag to construct a minimal representation of the needed jumps. We then construct a JumpProblem, and use the Composition-Rejection Direct method, DirectCR, to simulate the process of the molecules hopping about on the lattice:

- - -
-addjumps!(rn, build_regular_jumps=false, minimal_jumps=true)
-
-# make the initial condition integer valued 
-u₀ = zeros(Int, N)
-u₀[div(N,2)] = 10000
-
-# setup and solve the problem
-dprob = DiscreteProblem(rn, u₀, tspan, p)
-jprob = JumpProblem(dprob, DirectCR(), rn, save_positions=(false,false))
-jsol = solve(jprob, SSAStepper(), saveat=times)
-
- - -
-retcode: Default
-Interpolation: Piecewise constant interpolation
-t: 4-element Array{Float64,1}:
- 0.0   
- 0.0001
- 0.001 
- 0.01  
-u: 4-element Array{Array{Int64,1},1}:
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0  …  0, 0, 0, 0, 0, 0, 0, 0, 0, 0]       
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0  …  0, 0, 0, 0, 0, 0, 0, 0, 0, 0]       
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0  …  0, 0, 0, 0, 0, 0, 0, 0, 0, 0]       
- [3, 4, 2, 3, 12, 6, 5, 17, 21, 22  …  19, 13, 10, 9, 5, 5, 4, 1, 0, 0]
-
- - -

We can now plot bar graphs showing the locations of the molecules at the same set of times we examined the ODE solution. For comparison, we also plot the corresponding ODE solutions (red lines) that we found:

- - -
-times = [0., .0001, .001, .01]
-plts = []
-for i = 1:4
-    b = bar(1:N, jsol[i], legend=false, fmt=fmt, xlabel="i", ylabel="uᵢ", title=string("t = ", times[i]))
-    plot!(b,sol(times[i]))
-    push!(plts,b)
-end
-plot(plts...)
-
- - - - -

Similar to the ODE solutions, we see that the molecules spread out and become more and more well-mixed throughout the domain as $t$ increases. The simulation results are noisy due to the finite numbers of molecules present in the stochsatic simulation, but since the number of molecules is large they agree well with the ODE solution at each time.

-
-

Getting Help

-

Have a question related to DiffEqBiological or this tutorial? Feel free to ask in the DifferentialEquations.jl Gitter. If you think you've found a bug in DiffEqBiological, or would like to request/discuss new functionality, feel free to open an issue on Github (but please check there is no related issue already open). If you've found a bug in this tutorial, or have a suggestion, feel free to open an issue on the DiffEqTutorials Github site. Or, submit a pull request to DiffEqTutorials updating the tutorial!

-
- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("models","04-diffeqbio_II_networkproperties.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: macOS (x86_64-apple-darwin15.6.0)
-  CPU: Intel(R) Core(TM) i7-6920HQ CPU @ 2.90GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[14f7f29c-3bd6-536c-9a0b-7339e30b5a3e] AMD 0.3.0
-[28f2ccd6-bb30-5033-b560-165f7b14dc2f] ApproxFun 0.11.3
-[c52e3926-4ff0-5f6e-af25-54175e0327b1] Atom 0.8.5
-[aae01518-5342-5314-be14-df237901396f] BandedMatrices 0.9.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[ad839575-38b3-5650-b840-f874b8c74a25] Blink 0.10.1
-[336ed68f-0bac-5ca0-87d4-7b16caf5d00b] CSV 0.5.11
-[5d742f6a-9f54-50ce-8119-2520741973ca] CSVFiles 0.15.0
-[159f3aea-2a34-519c-b102-8c37f9878175] Cairo 0.5.6
-[3da002f7-5984-5a60-b8a6-cbb66c0b333f] ColorTypes 0.8.0
-[a93c6f00-e57d-5684-b7b6-d8193f3e46c0] DataFrames 0.19.2
-[864edb3b-99cc-5e75-8d2d-829cb0a9cfe8] DataStructures 0.17.0
-[2b5f629d-d688-5b77-993f-72d75c75574e] DiffEqBase 5.20.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.9.0
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.14.0
-[c894b116-72e5-5b58-be3c-e6d8d4ac2b12] DiffEqJump 6.2.0
-[78ddff82-25fc-5f2b-89aa-309469cbf16f] DiffEqMonteCarlo 0.15.1
-[9fdde737-9c7f-55bf-ade8-46b3f136cc48] DiffEqOperators 4.1.0
-[34035eb4-37db-58ae-b003-a3202c898701] DiffEqPDEBase 0.4.0
-[a077e3f3-b75c-5d7f-a0c6-6bc4c8ec64a9] DiffEqProblemLibrary 4.5.1
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.6.0
-[aaf54ef3-cdf8-58ed-94cc-d582ad619b94] DistributedArrays 0.6.3
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.21.1
-[e30172f5-a6a5-5a46-863b-614d45cd2de4] Documenter 0.23.2
-[5789e2e9-d7fb-5bc7-8068-2c6fae9b9549] FileIO 1.0.7
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[069b7b12-0de2-55c6-9aab-29f3d0a68a2e] FunctionWrappers 1.0.0
-[28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71] GR 0.41.0
-[14197337-ba66-59df-a3e3-ca00e7dcff7a] GenericLinearAlgebra 0.1.0
-[4c0ca9eb-093a-5379-98c5-f87ac0bbbf44] Gtk 0.17.0
-[19dc6840-f33b-545b-b366-655c7e3ffd49] HCubature 1.4.0
-[f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f] HDF5 0.12.0
-[cd3eb016-35fb-5094-929b-558a96fad6f3] HTTP 0.7.1
-[09f84164-cd44-5f33-b23f-e6b0d136a0d5] HypothesisTests 0.8.0
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.19.0
-[42fd0dbc-a981-5370-80f2-aaf504508153] IterativeSolvers 0.8.1
-[30d91d44-8115-11e8-1d28-c19a5ac16de8] JuAFEM 0.2.0
-[f80590ac-b429-510a-8a99-e7c46989f22d] JuliaFEM 0.5.0
-[aa1ae85d-cabe-5617-a682-6adf51b2e16a] JuliaInterpreter 0.5.2
-[e5e0dc1b-0480-54bc-9374-aad01c23163d] Juno 0.7.2
-[0b1a1467-8014-51b9-945f-bf0ae24f4b77] KrylovKit 0.3.4
-[b964fa9f-0449-5b57-a5c2-d3ea65f4040f] LaTeXStrings 1.0.3
-[2b0e0bc5-e4fd-59b4-8912-456d1b03d8d7] LanguageServer 0.6.0
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[5078a376-72f3-5289-bfd5-ec5146d43c02] LazyArrays 0.9.1
-[093fc24a-ae57-5d10-9952-331d41423f4d] LightGraphs 1.2.0
-[7a12625a-238d-50fd-b39a-03d52299707e] LinearMaps 2.5.0
-[23992714-dd62-5051-b70f-ba57cb901cac] MAT 0.5.0
-[1914dd2f-81c6-5fcd-8719-6d5c9610ff09] MacroTools 0.5.1
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.6.4
-[46d2c3a1-f734-5fdb-9937-b9b9aeba4221] MuladdMacro 0.2.1
-[47be7bcc-f1a6-5447-8b36-7eeeff7534fd] ORCA 0.2.1
-[510215fc-4207-5dde-b226-833fc4488ee2] Observables 0.2.3
-[5fb14364-9ced-5910-84b2-373655c76a03] OhMyREPL 0.5.1
-[bac558e1-5e72-5ebc-8fee-abe8a469f55d] OrderedCollections 1.1.0
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.14.0
-[3b7a836e-365b-5785-a47d-02c71176b4aa] PGFPlots 3.1.3
-[9b87118b-4619-50d2-8e1e-99f35a4d4d9d] PackageCompiler 0.6.4
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.2.1
-[d96e819e-fc66-5662-9728-84c9c7592b0a] Parameters 0.11.0
-[995b91a9-d308-5afd-9ec6-746e21dbc043] PlotUtils 0.5.8
-[58dd65bb-95f3-509e-9936-c39a10fdeae7] Plotly 0.2.0
-[f0f68f2c-4968-5e81-91da-67840de0976a] PlotlyJS 0.12.5
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.26.1
-[f27b6e38-b328-58d1-80ce-0feddd5e7a45] Polynomials 0.5.2
-[27ebfcd6-29c5-5fa9-bf4b-fb8fc14df3ae] Primes 0.4.0
-[c46f51b8-102a-5cf2-8d2c-8597cb0e0da7] ProfileView 0.4.1
-[438e738f-606a-5dbb-bf0a-cddfbfd45ab0] PyCall 1.91.2
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[1fd47b50-473d-5c70-9696-f719f8f3bcdc] QuadGK 2.0.3
-[e6cf234a-135c-5ec9-84dd-332b85af5143] RandomNumbers 1.3.0
-[b4db0fb7-de2a-5028-82bf-5021f5cfa881] ReactionNetworkImporters 0.1.5
-[295af30f-e4ad-537b-8983-00126c2a3abe] Revise 2.1.6
-[c4c386cf-5103-5370-be45-f3a111cca3b8] Rsvg 0.2.3
-[276daf66-3868-5448-9aa4-cd146d93841b] SpecialFunctions 0.7.2
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91] StatsBase 0.32.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.10.2
-[9672c7b4-1e72-59bd-8a11-6ac3964bc41f] SteadyStateDiffEq 1.5.0
-[789caeaf-c7a9-5a7d-9973-96adeb23e2a0] StochasticDiffEq 6.8.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[123dc426-2d89-5057-bbad-38513e3affd8] SymEngine 0.7.0
-[e0df1984-e451-5cb5-8b61-797a481e67e3] TextParse 0.9.1
-[a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f] TimerOutputs 0.5.0
-[37b6cedf-1f77-55f8-9503-c64b63398394] Traceur 0.3.0
-[28d57a85-8fef-5791-bfe6-a80928e7c999] Transducers 0.3.1
-[39424ebd-4cf3-5550-a685-96706a953f40] TreeView 0.3.1
-[b8865327-cd53-5732-bb35-84acbb429228] UnicodePlots 1.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.16.0
-[2a06ce6d-1589-592b-9c33-f37faeaed826] UnitfulPlots 0.0.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.1
-[0f1e0344-ec1d-5b48-a673-e5cf874b6c29] WebIO 0.8.9
-[9abbd945-dff8-562f-b5e8-e1ebf5ef1b79] Profile
-[2f01184e-e22b-5df5-ae63-d93ebab69eaf] SparseArrays
-
- - - -
- - - -
-
-
- - diff --git a/html/models/04b-diffeqbio_III_steadystates.html b/html/models/04b-diffeqbio_III_steadystates.html deleted file mode 100644 index 7bba7050..00000000 --- a/html/models/04b-diffeqbio_III_steadystates.html +++ /dev/null @@ -1,954 +0,0 @@ - - - - - - DiffEqBiological Tutorial III: Steady-States and Bifurcations - - - - - - - - - - - - - - - - - -
-
-
- -
-

DiffEqBiological Tutorial III: Steady-States and Bifurcations

-
Torkel Loman and Samuel Isaacson
- -
- -

Several types of steady state analysis can be performed for networks defined with DiffEqBiological by utilizing homotopy continuation. This allows for finding the steady states and bifurcations within a large class of systems. In this tutorial we'll go through several examples of using this functionality.

-

We start by loading the necessary packages:

- - -
-using DiffEqBiological, Plots
-gr(); default(fmt = :png);
-
- - - -

Steady states and stability of a biochemical reaction network.

-

Bistable switches are well known biological motifs, characterised by the presence of two different stable steady states.

- - -
-bistable_switch = @reaction_network begin
-    d,    (X,Y)  
-    hillR(Y,v1,K1,n1),   X
-    hillR(X,v2,K2,n2),   Y
-end d v1 K1 n1 v2 K2 n2
-d = 0.01;
-v1 = 1.5; K1 = 30; n1 = 3;
-v2 = 1.; K2 = 30; n2 = 3;
-bistable_switch_p = [d, v1 ,K1, n1, v2, K2, n2];
-
- - - -

The steady states can be found using the steady_states function (which takes a reaction network and a set of parameter values as input). The stability of these steady states can be found using the stability function.

- - -
-ss = steady_states(bistable_switch, bistable_switch_p)
-
- - -
-3-element Array{Array{Float64,1},1}:
- [31.322504001213243, 46.769050724087236]
- [3.970283396636649, 99.76874280256095]  
- [149.9972223365578, 0.7936945352275889]
-
- - - -
-stability(ss,bistable_switch, bistable_switch_p)
-
- - -
-3-element Array{Bool,1}:
- 0
- 1
- 1
-
- - -

Since the equilibration methodology is based on homotopy continuation, it is not able to handle systems with non-integer exponents, or non polynomial reaction rates. Neither of the following two systems will work.

-

This system contains a non-integer exponent:

- - -
-rn1 = @reaction_network begin
-    p,   X
-    hill(X,v,K,n), X  
-end p v K n
-p1 = [1.,2.5,1.5,1.5]
-steady_states(rn1,p1)
-
- - -
-ERROR: MethodError: no method matching ^(::DynamicPolynomials.PolyVar{true}, ::Float64)
-Closest candidates are:
-  ^(!Matched::Missing, ::Number) at missing.jl:94
-  ^(!Matched::Float64, ::Float64) at math.jl:781
-  ^(!Matched::Irrational{:ℯ}, ::Number) at mathconstants.jl:91
-  ...
-
- - -

This system contains a logarithmic reaction rate:

- - -
-rn2 = @reaction_network begin
-    p,   X
-    log(X), X  
-end p
-p2 = [1.]
-steady_states(rn2,p2)
-
- - -
-ERROR: This reaction network does not correspond to a polynomial system. Some of the reaction rate must contain non polynomial terms.
-
- - -

Bifurcation diagrams for biochemical reaction networks

-

Bifurcation diagrams illustrate how the steady states of a system depend on one or more parameters. They can be computed with the bifurcations function. It takes the same arguments as steady_states, with the addition of the parameter one wants to vary, and an interval over which to vary it:

- - -
-bif = bifurcations(bistable_switch, bistable_switch_p, :v1, (.1,5.))
-plot(bif,ylabel="[X]",label="")
-plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"])
-
- - - - -

The values for the second variable in the system can also be displayed, by giving that as an additional input to plot (it is the second argument, directly after the bifurcation diagram object):

- - -
-plot(bif,2,ylabel="[Y]")
-plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"])
-
- - - - -

The plot function also accepts all other arguments which the Plots.jl plot function accepts.

- - -
-bif = bifurcations(bistable_switch, bistable_switch_p,:v1,(.1,10.))
-plot(bif,linewidth=1.,title="A bifurcation diagram",ylabel="Steady State concentration")
-plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"])
-
- - - - -

Certain parameters, like n1, cannot be sensibly varied over a continuous interval. Instead, a discrete bifurcation diagram can be calculated with the bifurcation_grid function. Instead of an interval, the last argument is a range of numbers:

- - -
-bif = bifurcation_grid(bistable_switch, bistable_switch_p,:n1,1.:5.)
-plot(bif)
-scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"])
-
- - - - -

Bifurcation diagrams over two dimensions

-

In addition to the bifurcation diagrams illustrated above, where only a single variable is varied, it is also possible to investigate the steady state properties of s system as two different parameters are varied. Due to the nature of the underlying bifurcation algorithm it is not possible to continuously vary both parameters. Instead, a set of discrete values are selected for the first parameter, and a continuous interval for the second. Next, for each discrete value of the first parameter, a normal bifurcation diagram is created over the interval given for the second parameter.

- - -
-bif = bifurcation_grid_diagram(bistable_switch, bistable_switch_p,:n1,0.:4.,:v1,(.1,5.))
-plot(bif)
-plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"])
-
- - - - -

In the single variable case we could use a bifurcation_grid to investigate the behavior of a parameter which could only attain discrete values. In the same way, if we are interested in two parameters, both of which require integer values, we can use bifrucation_grid_2d. In our case, this is required if we want to vary both the parameters n1 and n2:

- - -
-bif = bifurcation_grid_2d(bistable_switch, bistable_switch_p,:n1,1.:3.,:n2,1.:10.)
-plot(bif)
-scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"])
-
- - - - -

The Brusselator

-

The Brusselator is a well know reaction network, which may or may not oscillate, depending on parameter values.

- - -
-brusselator = @reaction_network begin
-    A,   X
-    1, 2X + Y  3X
-    B, X  Y
-    1, X  
-end A B;
-A = 0.5; B = 4.;
-brusselator_p = [A, B];
-
- - - -

The system has only one steady state, for $(X,Y)=(A,B/A)$ This fixed point becomes unstable when $B > 1+A^2$, leading to oscillations. Bifurcation diagrams can be used to determine the system's stability, and hence look for where oscillations might appear in the Brusselator:

- - -
-bif = bifurcations(brusselator,brusselator_p,:B,(0.1,2.5))
-plot(bif,2)
-plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"])
-
- - - - -

Here red and yellow colors label unstable steady-states, while blue and cyan label stable steady-states. (In addition, yellow and cyan correspond to points where at least one eigenvalue of the Jacobian is imaginary, while red and blue correspond to points with real-valued eigenvalues.)

-

Given A=0.5, the point at which the system should become unstable is B=1.25. We can confirm this in the bifurcation diagram.

-

We can also investigate the behavior when we vary both parameters of the system:

- - -
-bif = bifurcation_grid_diagram(brusselator,brusselator_p,:B,0.5:0.02:5.0,:A,(0.2,5.0))
-plot(bif)
-plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"])
-
- - - - -
-

Getting Help

-

Have a question related to DiffEqBiological or this tutorial? Feel free to ask in the DifferentialEquations.jl Gitter. If you think you've found a bug in DiffEqBiological, or would like to request/discuss new functionality, feel free to open an issue on Github (but please check there is no related issue already open). If you've found a bug in this tutorial, or have a suggestion, feel free to open an issue on the DiffEqTutorials Github site. Or, submit a pull request to DiffEqTutorials updating the tutorial!

-
- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("models","04b-diffeqbio_III_steadystates.jmd")
-
-

Computer Information:

-
-
Julia Version 1.2.0
-Commit c6da87ff4b (2019-08-20 00:03 UTC)
-Platform Info:
-  OS: macOS (x86_64-apple-darwin18.6.0)
-  CPU: Intel(R) Core(TM) i7-6920HQ CPU @ 2.90GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.2/Project.toml`
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.3
-[a93c6f00-e57d-5684-b7b6-d8193f3e46c0] DataFrames 0.19.4
-[2b5f629d-d688-5b77-993f-72d75c75574e] DiffEqBase 6.3.4
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 4.0.1
-[c894b116-72e5-5b58-be3c-e6d8d4ac2b12] DiffEqJump 6.2.2
-[a077e3f3-b75c-5d7f-a0c6-6bc4c8ec64a9] DiffEqProblemLibrary 4.5.1
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.8.0
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.20.0
-[42fd0dbc-a981-5370-80f2-aaf504508153] IterativeSolvers 0.8.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.11.0
-[54ca160b-1b9f-5127-a996-1867f4bc2a2c] ODEInterface 0.4.6
-[47be7bcc-f1a6-5447-8b36-7eeeff7534fd] ORCA 0.3.0
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.17.2
-[f0f68f2c-4968-5e81-91da-67840de0976a] PlotlyJS 0.13.0
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.27.0
-[438e738f-606a-5dbb-bf0a-cddfbfd45ab0] PyCall 1.91.2
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.2
-[b4db0fb7-de2a-5028-82bf-5021f5cfa881] ReactionNetworkImporters 0.1.5
-[295af30f-e4ad-537b-8983-00126c2a3abe] Revise 2.2.0
-[789caeaf-c7a9-5a7d-9973-96adeb23e2a0] StochasticDiffEq 6.11.2
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.7.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.1
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[d6f4376e-aef5-505a-96c1-9c027394607a] Markdown
-
- - - -
- - - -
-
-
- - diff --git a/html/models/05-kepler_problem.html b/html/models/05-kepler_problem.html deleted file mode 100644 index 5f41b55a..00000000 --- a/html/models/05-kepler_problem.html +++ /dev/null @@ -1,959 +0,0 @@ - - - - - - Kepler Problem - - - - - - - - - - - - - - - - - -
-
-
- -
-

Kepler Problem

-
Yingbo Ma, Chris Rackauckas
- -
- -

The Hamiltonian $\mathcal {H}$ and the angular momentum $L$ for the Kepler problem are

-

\[ -\mathcal {H} = \frac{1}{2}(\dot{q}^2_1+\dot{q}^2_2)-\frac{1}{\sqrt{q^2_1+q^2_2}},\quad -L = q_1\dot{q_2} - \dot{q_1}q_2 -\]

-

Also, we know that

-

\[ -{\displaystyle {\frac {\mathrm {d} {\boldsymbol {p}}}{\mathrm {d} t}}=-{\frac {\partial {\mathcal {H}}}{\partial {\boldsymbol {q}}}}\quad ,\quad {\frac {\mathrm {d} {\boldsymbol {q}}}{\mathrm {d} t}}=+{\frac {\partial {\mathcal {H}}}{\partial {\boldsymbol {p}}}}} -\]

- - -
-using OrdinaryDiffEq, LinearAlgebra, ForwardDiff, Plots; gr()
-H(q,p) = norm(p)^2/2 - inv(norm(q))
-L(q,p) = q[1]*p[2] - p[1]*q[2]
-
-pdot(dp,p,q,params,t) = ForwardDiff.gradient!(dp, q->-H(q, p), q)
-qdot(dq,p,q,params,t) = ForwardDiff.gradient!(dq, p-> H(q, p), p)
-
-initial_position = [.4, 0]
-initial_velocity = [0., 2.]
-initial_cond = (initial_position, initial_velocity)
-initial_first_integrals = (H(initial_cond...), L(initial_cond...))
-tspan = (0,20.)
-prob = DynamicalODEProblem(pdot, qdot, initial_velocity, initial_position, tspan)
-sol = solve(prob, KahanLi6(), dt=1//10);
-
- - - -

Let's plot the orbit and check the energy and angular momentum variation. We know that energy and angular momentum should be constant, and they are also called first integrals.

- - -
-plot_orbit(sol) = plot(sol,vars=(3,4), lab="Orbit", title="Kepler Problem Solution")
-
-function plot_first_integrals(sol, H, L)
-    plot(initial_first_integrals[1].-map(u->H(u[2,:], u[1,:]), sol.u), lab="Energy variation", title="First Integrals")
-    plot!(initial_first_integrals[2].-map(u->L(u[2,:], u[1,:]), sol.u), lab="Angular momentum variation")
-end
-analysis_plot(sol, H, L) = plot(plot_orbit(sol), plot_first_integrals(sol, H, L))
-
- - -
-analysis_plot (generic function with 1 method)
-
- - - -
-analysis_plot(sol, H, L)
-
- - - - -

Let's try to use a Runge-Kutta-Nyström solver to solve this problem and check the first integrals' variation.

- - -
-sol2 = solve(prob, DPRKN6())  # dt is not necessary, because unlike symplectic
-                              # integrators DPRKN6 is adaptive
-@show sol2.u |> length
-
- - -
-sol2.u |> length = 79
-
- - - -
-analysis_plot(sol2, H, L)
-
- - - - -

Let's then try to solve the same problem by the ERKN4 solver, which is specialized for sinusoid-like periodic function

- - -
-sol3 = solve(prob, ERKN4()) # dt is not necessary, because unlike symplectic
-                            # integrators ERKN4 is adaptive
-@show sol3.u |> length
-
- - -
-sol3.u |> length = 52
-
- - - -
-analysis_plot(sol3, H, L)
-
- - - - -

We can see that ERKN4 does a bad job for this problem, because this problem is not sinusoid-like.

-

One advantage of using DynamicalODEProblem is that it can implicitly convert the second order ODE problem to a normal system of first order ODEs, which is solvable for other ODE solvers. Let's use the Tsit5 solver for the next example.

- - -
-sol4 = solve(prob, Tsit5())
-@show sol4.u |> length
-
- - -
-sol4.u |> length = 54
-
- - - -
-analysis_plot(sol4, H, L)
-
- - - - -

Note

-

There is drifting for all the solutions, and high order methods are drifting less because they are more accurate.

-

Conclusion

-
-

Symplectic integrator does not conserve the energy completely at all time, but the energy can come back. In order to make sure that the energy fluctuation comes back eventually, symplectic integrator has to have a fixed time step. Despite the energy variation, symplectic integrator conserves the angular momentum perfectly.

-

Both Runge-Kutta-Nyström and Runge-Kutta integrator do not conserve energy nor the angular momentum, and the first integrals do not tend to come back. An advantage Runge-Kutta-Nyström integrator over symplectic integrator is that RKN integrator can have adaptivity. An advantage Runge-Kutta-Nyström integrator over Runge-Kutta integrator is that RKN integrator has less function evaluation per step. The ERKN4 solver works best for sinusoid-like solutions.

-

Manifold Projection

-

In this example, we know that energy and angular momentum should be conserved. We can achieve this through mainfold projection. As the name implies, it is a procedure to project the ODE solution to a manifold. Let's start with a base case, where mainfold projection isn't being used.

- - -
-using DiffEqCallbacks
-
-plot_orbit2(sol) = plot(sol,vars=(1,2), lab="Orbit", title="Kepler Problem Solution")
-
-function plot_first_integrals2(sol, H, L)
-    plot(initial_first_integrals[1].-map(u->H(u[1:2],u[3:4]), sol.u), lab="Energy variation", title="First Integrals")
-    plot!(initial_first_integrals[2].-map(u->L(u[1:2],u[3:4]), sol.u), lab="Angular momentum variation")
-end
-
-analysis_plot2(sol, H, L) = plot(plot_orbit2(sol), plot_first_integrals2(sol, H, L))
-
-function hamiltonian(du,u,params,t)
-    q, p = u[1:2], u[3:4]
-    qdot(@view(du[1:2]), p, q, params, t)
-    pdot(@view(du[3:4]), p, q, params, t)
-end
-
-prob2 = ODEProblem(hamiltonian, [initial_position; initial_velocity], tspan)
-sol_ = solve(prob2, RK4(), dt=1//5, adaptive=false)
-analysis_plot2(sol_, H, L)
-
- - - - -

There is a significant fluctuation in the first integrals, when there is no mainfold projection.

- - -
-function first_integrals_manifold(residual,u)
-    residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4])
-    residual[3:4] .= initial_first_integrals[2] - L(u[1:2], u[3:4])
-end
-
-cb = ManifoldProjection(first_integrals_manifold)
-sol5 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=cb)
-analysis_plot2(sol5, H, L)
-
- - - - -

We can see that thanks to the manifold projection, the first integrals' variation is very small, although we are using RK4 which is not symplectic. But wait, what if we only project to the energy conservation manifold?

- - -
-function energy_manifold(residual,u)
-    residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4])
-    residual[3:4] .= 0
-end
-energy_cb = ManifoldProjection(energy_manifold)
-sol6 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=energy_cb)
-analysis_plot2(sol6, H, L)
-
- - - - -

There is almost no energy variation but angular momentum varies quite bit. How about only project to the angular momentum conservation manifold?

- - -
-function angular_manifold(residual,u)
-    residual[1:2] .= initial_first_integrals[2] - L(u[1:2], u[3:4])
-    residual[3:4] .= 0
-end
-angular_cb = ManifoldProjection(angular_manifold)
-sol7 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=angular_cb)
-analysis_plot2(sol7, H, L)
-
- - - - -

Again, we see what we expect.

- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("models","05-kepler_problem.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/models/06-pendulum_bayesian_inference.html b/html/models/06-pendulum_bayesian_inference.html deleted file mode 100644 index 08b65c17..00000000 --- a/html/models/06-pendulum_bayesian_inference.html +++ /dev/null @@ -1,847 +0,0 @@ - - - - - - Bayesian Inference on a Pendulum using Turing.jl - - - - - - - - - - - - - - - - - -
-
-
- -
-

Bayesian Inference on a Pendulum using Turing.jl

-
Vaibhav Dixit
- -
- -

Set up simple pendulum problem

- - -
-using DiffEqBayes, OrdinaryDiffEq, RecursiveArrayTools, Distributions, Plots, StatsPlots
-
- - - -

Let's define our simple pendulum problem. Here our pendulum has a drag term ω and a length L.

-

pendulum

-

We get first order equations by defining the first term as the velocity and the second term as the position, getting:

- - -
-function pendulum(du,u,p,t)
-    ω,L = p
-    x,y = u
-    du[1] = y
-    du[2] = - ω*y -(9.8/L)*sin(x)
-end
-
-u0 = [1.0,0.1]
-tspan = (0.0,10.0)
-prob1 = ODEProblem(pendulum,u0,tspan,[1.0,2.5])
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 10.0)
-u0: [1.0, 0.1]
-
- - -

Solve the model and plot

-

To understand the model and generate data, let's solve and visualize the solution with the known parameters:

- - -
-sol = solve(prob1,Tsit5())
-plot(sol)
-
- - - - -

It's the pendulum, so you know what it looks like. It's periodic, but since we have not made a small angle assumption it's not exactly sin or cos. Because the true dampening parameter ω is 1, the solution does not decay over time, nor does it increase. The length L determines the period.

-

Create some dummy data to use for estimation

-

We now generate some dummy data to use for estimation

- - -
-t = collect(range(1,stop=10,length=10))
-randomized = VectorOfArray([(sol(t[i]) + .01randn(2)) for i in 1:length(t)])
-data = convert(Array,randomized)
-
- - -
-2×10 Array{Float64,2}:
-  0.0615493  -0.368933  0.128309   0.0971307  …  -0.0205744  -0.00052941
- -1.21598     0.365816  0.31529   -0.252625       0.0151109  -0.00473605
-
- - -

Let's see what our data looks like on top of the real solution

- - -
-scatter!(data')
-
- - - - -

This data captures the non-dampening effect and the true period, making it perfect to attempting a Bayesian inference.

-

Perform Bayesian Estimation

-

Now let's fit the pendulum to the data. Since we know our model is correct, this should give us back the parameters that we used to generate the data! Define priors on our parameters. In this case, let's assume we don't have much information, but have a prior belief that ω is between 0.1 and 3.0, while the length of the pendulum L is probably around 3.0:

- - -
-priors = [Uniform(0.1,3.0), Normal(3.0,1.0)]
-
- - -
-2-element Array{Distributions.Distribution{Distributions.Univariate,Distrib
-utions.Continuous},1}:
- Distributions.Uniform{Float64}(a=0.1, b=3.0)
- Distributions.Normal{Float64}(μ=3.0, σ=1.0)
-
- - -

Finally let's run the estimation routine from DiffEqBayes.jl using the Turing.jl backend

- - -
-bayesian_result = turing_inference(prob1,Tsit5(),t,data,priors;num_samples=10_000,
-                                   syms = [:omega,:L])
-
- - -
-Object of type Chains, with data of type 10000×4×1 Array{Union{Missing, Flo
-at64},3}
-
-Log evidence      = -3.4208122518018893
-Iterations        = 1:10000
-Thinning interval = 1
-Chains            = 1
-Samples per chain = 10000
-internals         = lp
-parameters        = _theta[2], σ, _theta[1]
-
-2-element Array{MCMCChains.ChainDataFrame,1}
-
-Summary Statistics
-. Omitted printing of 1 columns
-│ Row │ parameters │ mean    │ std      │ naive_se   │ mcse       │ ess    
- │
-│     │ Symbol     │ Float64 │ Float64  │ Float64    │ Float64    │ Any    
- │
-├─────┼────────────┼─────────┼──────────┼────────────┼────────────┼────────
-─┤
-│ 1   │ _theta[1]  │ 1.54937 │ 0.837743 │ 0.00837743 │ 0.00791079 │ 10000.0
- │
-│ 2   │ _theta[2]  │ 3.02832 │ 1.00472  │ 0.0100472  │ 0.00923376 │ 10000.0
- │
-│ 3   │ σ          │ 2.89973 │ 5.31736  │ 0.0531736  │ 0.053681   │ 10000.0
- │
-
-Quantiles
-
-│ Row │ parameters │ 2.5%     │ 25.0%    │ 50.0%   │ 75.0%   │ 97.5%   │
-│     │ Symbol     │ Float64  │ Float64  │ Float64 │ Float64 │ Float64 │
-├─────┼────────────┼──────────┼──────────┼─────────┼─────────┼─────────┤
-│ 1   │ _theta[1]  │ 0.171025 │ 0.826122 │ 1.551   │ 2.26978 │ 2.92857 │
-│ 2   │ _theta[2]  │ 1.07599  │ 2.35301  │ 3.00239 │ 3.7014  │ 5.0227  │
-│ 3   │ σ          │ 0.538211 │ 1.10267  │ 1.75954 │ 3.06687 │ 11.9888 │
-
- - -

Notice that while our guesses had the wrong means, the learned parameters converged to the correct means, meaning that it learned good posterior distributions for the parameters. To look at these posterior distributions on the parameters, we can examine the chains:

- - -
-plot(bayesian_result)
-
- - - - -

As a diagnostic, we will also check the parameter chains. The chain is the MCMC sampling process. The chain should explore parameter space and converge reasonably well, and we should be taking a lot of samples after it converges (it is these samples that form the posterior distribution!)

- - -
-plot(bayesian_result, colordim = :parameter)
-
- - - - -

Notice that after awhile these chains converge to a "fuzzy line", meaning it found the area with the most likelihood and then starts to sample around there, which builds a posterior distribution around the true mean.

- - - -
- - - -
-
-
- - diff --git a/html/models/07-outer_solar_system.html b/html/models/07-outer_solar_system.html deleted file mode 100644 index 12d7bb57..00000000 --- a/html/models/07-outer_solar_system.html +++ /dev/null @@ -1,826 +0,0 @@ - - - - - - The Outer Solar System - - - - - - - - - - - - - - - - - -
-
-
- -
-

The Outer Solar System

-
Yingbo Ma, Chris Rackauckas
- -
- -

Data

-

The chosen units are: masses relative to the sun, so that the sun has mass $1$. We have taken $m_0 = 1.00000597682$ to take account of the inner planets. Distances are in astronomical units , times in earth days, and the gravitational constant is thus $G = 2.95912208286 \cdot 10^{-4}$.

-Markdown.Table(Array{Any,1}[[Any["planet"], Any["mass"], Any["initial position"], Any["initial velocity"]], [Any["Jupiter"], Any[$m_1 = 0.000954786104043$], Any["<ul><li>-3.5023653</li><li>-3.8169847</li><li>-1.5507963</li></ul>"], Any["<ul><li>0.00565429</li><li>-0.00412490</li><li>-0.00190589</li></ul>"]], [Any["Saturn"], Any[$m_2 = 0.000285583733151$], Any["<ul><li>9.0755314</li><li>-3.0458353</li><li>-1.6483708</li></ul>"], Any["<ul><li>0.00168318</li><li>0.00483525</li><li>0.00192462</li></ul>"]], [Any["Uranus"], Any[$m_3 = 0.0000437273164546$], Any["<ul><li>8.3101420</li><li>-16.2901086</li><li>-7.2521278</li></ul>"], Any["<ul><li>0.00354178</li><li>0.00137102</li><li>0.00055029</li></ul>"]], [Any["Neptune"], Any[$m_4 = 0.0000517759138449$], Any["<ul><li>11.4707666</li><li>-25.7294829</li><li>-10.8169456</li></ul>"], Any["<ul><li>0.00288930</li><li>0.00114527</li><li>0.00039677</li></ul>"]], [Any["Pluto"], Any["\$ m_5 = 1/(1.3 \\cdot 10^8 )\$"], Any["<ul><li>-15.5387357</li><li>-25.2225594</li><li>-3.1902382</li></ul>"], Any["<ul><li>0.00276725</li><li>-0.00170702</li><li>-0.00136504</li></ul>"]]], Symbol[:r, :r, :r, :r]) -

The data is taken from the book "Geometric Numerical Integration" by E. Hairer, C. Lubich and G. Wanner.

- - -
-using Plots, OrdinaryDiffEq, DiffEqPhysics, RecursiveArrayTools
-gr()
-
-G = 2.95912208286e-4
-M = [1.00000597682, 0.000954786104043, 0.000285583733151, 0.0000437273164546, 0.0000517759138449, 1/1.3e8]
-planets = ["Sun", "Jupiter", "Saturn", "Uranus", "Neptune", "Pluto"]
-
-pos_x = [0.0,-3.5023653,9.0755314,8.3101420,11.4707666,-15.5387357]
-pos_y = [0.0,-3.8169847,-3.0458353,-16.2901086,-25.7294829,-25.2225594]
-pos_z = [0.0,-1.5507963,-1.6483708,-7.2521278,-10.8169456,-3.1902382]
-pos = ArrayPartition(pos_x,pos_y,pos_z)
-
-vel_x = [0.0,0.00565429,0.00168318,0.00354178,0.00288930,0.00276725]
-vel_y = [0.0,-0.00412490,0.00483525,0.00137102,0.00114527,-0.00170702]
-vel_z = [0.0,-0.00190589,0.00192462,0.00055029,0.00039677,-0.00136504]
-vel = ArrayPartition(vel_x,vel_y,vel_z)
-
-tspan = (0.,200_000)
-
- - -
-(0.0, 200000)
-
- - -

The N-body problem's Hamiltonian is

-

\[ -H(p,q) = \frac{1}{2}\sum_{i=0}^{N}\frac{p_{i}^{T}p_{i}}{m_{i}} - G\sum_{i=1}^{N}\sum_{j=0}^{i-1}\frac{m_{i}m_{j}}{\left\lVert q_{i}-q_{j} \right\rVert} -\]

-

Here, we want to solve for the motion of the five outer planets relative to the sun, namely, Jupiter, Saturn, Uranus, Neptune and Pluto.

- - -
-const  = sum
-const N = 6
-potential(p, t, x, y, z, M) = -G*(i->(j->(M[i]*M[j])/sqrt((x[i]-x[j])^2 + (y[i]-y[j])^2 + (z[i]-z[j])^2), 1:i-1), 2:N)
-
- - -
-potential (generic function with 1 method)
-
- - -

Hamiltonian System

-

NBodyProblem constructs a second order ODE problem under the hood. We know that a Hamiltonian system has the form of

-

\[ -\dot{p} = -H_{q}(p,q)\quad \dot{q}=H_{p}(p,q) -\]

-

For an N-body system, we can symplify this as:

-

\[ -\dot{p} = -\nabla{V}(q)\quad \dot{q}=M^{-1}p. -\]

-

Thus $\dot{q}$ is defined by the masses. We only need to define $\dot{p}$, and this is done internally by taking the gradient of $V$. Therefore, we only need to pass the potential function and the rest is taken care of.

- - -
-nprob = NBodyProblem(potential, M, pos, vel, tspan)
-sol = solve(nprob,Yoshida6(), dt=100);
-
- - - - -
-orbitplot(sol,body_names=planets)
-
- - - - - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("models","07-outer_solar_system.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[159f3aea-2a34-519c-b102-8c37f9878175] Cairo 0.5.6
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/ode_extras/01-ModelingToolkit.html b/html/ode_extras/01-ModelingToolkit.html deleted file mode 100644 index 04f2fbbb..00000000 --- a/html/ode_extras/01-ModelingToolkit.html +++ /dev/null @@ -1,1331 +0,0 @@ - - - - - - ModelingToolkit.jl, An IR and Compiler for Scientific Models - - - - - - - - - - - - - - - - - -
-
-
- -
-

ModelingToolkit.jl, An IR and Compiler for Scientific Models

-
Chris Rackauckas
- -
- -

A lot of people are building modeling languages for their specific domains. However, while the syntax my vary greatly between these domain-specific languages (DSLs), the internals of modeling frameworks are surprisingly similar: building differential equations, calculating Jacobians, etc.

-

ModelingToolkit.jl is metamodeling systemitized

-

After building our third modeling interface, we realized that this problem can be better approached by having a reusable internal structure which DSLs can target. This internal is ModelingToolkit.jl: an Intermediate Representation (IR) with a well-defined interface for defining system transformations and compiling to Julia functions for use in numerical libraries. Now a DSL can easily be written by simply defining the translation to ModelingToolkit.jl's primatives and querying for the mathematical quantities one needs.

-

Basic usage: defining differential equation systems, with performance!

-

Let's explore the IR itself. ModelingToolkit.jl is friendly to use, and can used as a symbolic DSL in its own right. Let's define and solve the Lorenz differential equation system using ModelingToolkit to generate the functions:

- - -
-using ModelingToolkit
-
-### Define a differential equation system
-
-@parameters t σ ρ β
-@variables x(t) y(t) z(t)
-@derivatives D'~t
-
-eqs = [D(x) ~ σ*(y-x),
-       D(y) ~ x*(ρ-z)-y,
-       D(z) ~ x*y - β*z]
-de = ODESystem(eqs)
-ode_f = ODEFunction(de, [x,y,z], [σ,ρ,β])
-
-### Use in DifferentialEquations.jl
-
-using OrdinaryDiffEq
-u₀ = ones(3)
-tspan = (0.0,100.0)
-p = [10.0,28.0,10/3]
-prob = ODEProblem(ode_f,u₀,tspan,p)
-sol = solve(prob,Tsit5())
-
-using Plots
-plot(sol,vars=(1,2,3))
-
- - - - -

ModelingToolkit is a compiler for mathematical systems

-

At its core, ModelingToolkit is a compiler. It's IR is its type system, and its output are Julia functions (it's a compiler for Julia code to Julia code, written in Julia).

-

DifferentialEquations.jl wants a function f(du,u,p,t) for defining an ODE system, which is what ModelingToolkit.jl is building.

- - -
-generate_function(de, [x,y,z], [σ,ρ,β])
-
- - -
-:((##383, u, p, t)->begin
-          #= /home/alex/.julia/packages/ModelingToolkit/S0mks/src/utils.jl:44 =#
-          let (x, y, z, σ, ρ, β) = (u[1], u[2], u[3], p[1], p[2], p[3])
-              ##383[1] = (*)(σ, (-)(y, x))
-              ##383[2] = (-)((*)(x, (-)(ρ, z)), y)
-              ##383[3] = (-)((*)(x, y), (*)(β, z))
-          end
-      end)
-
- - -

A special syntax in DifferentialEquations.jl for small static ODE systems uses f(u,p,t), which can be generated as well:

- - -
-generate_function(de, [x,y,z], [σ,ρ,β]; version=ModelingToolkit.SArrayFunction)
-
- - -
-:((u, p, t)->begin
-          #= /home/alex/.julia/packages/ModelingToolkit/S0mks/src/utils.jl:48 =#
-          #= /home/alex/.julia/packages/ModelingToolkit/S0mks/src/utils.jl:49 =#
-          X = let (x, y, z, σ, ρ, β) = (u[1], u[2], u[3], p[1], p[2], p[3])
-                  ((*)(σ, (-)(y, x)), (-)((*)(x, (-)(ρ, z)), y), (-)((*)(x, y), (*)(β, z)))
-              end
-          #= /home/alex/.julia/packages/ModelingToolkit/S0mks/src/utils.jl:50 =#
-          T = StaticArrays.similar_type(typeof(u), eltype(X))
-          #= /home/alex/.julia/packages/ModelingToolkit/S0mks/src/utils.jl:51 =#
-          T(X)
-      end)
-
- - -

ModelingToolkit.jl can be used to calculate the Jacobian of the differential equation system:

- - -
-jac = calculate_jacobian(de)
-
- - -
-3×3 Array{ModelingToolkit.Expression,2}:
-     σ() * -1           σ()  Constant(0)
- ρ() - z(t())  Constant(-1)  x(t()) * -1
-       y(t())        x(t())     -1 * β()
-
- - -

It will automatically generate functions for using this Jacobian within the stiff ODE solvers for faster solving:

- - -
-jac_expr = generate_jacobian(de)
-
- - -
-:((##384, u, p, t)->begin
-          #= /home/alex/.julia/packages/ModelingToolkit/S0mks/src/utils.jl:44 =#
-          let (x, y, z, ρ, σ, β) = (u[1], u[2], u[3], p[1], p[2], p[3])
-              ##384[1] = (*)(σ, -1)
-              ##384[2] = (-)(ρ, z)
-              ##384[3] = y
-              ##384[4] = σ
-              ##384[5] = -1
-              ##384[6] = x
-              ##384[7] = 0
-              ##384[8] = (*)(x, -1)
-              ##384[9] = (*)(-1, β)
-          end
-      end)
-
- - -

It can even do fancy linear algebra. Stiff ODE solvers need to perform an LU-factorization which is their most expensive part. But ModelingToolkit.jl can skip this operation and instead generate the analytical solution to a matrix factorization, and build a Julia function for directly computing the factorization, which is then optimized in LLVM compiler passes.

- - -
-ModelingToolkit.generate_factorized_W(de)[1]
-
- - -
-:((##385, u, p, gam, t)->begin
-          #= /home/alex/.julia/packages/ModelingToolkit/S0mks/src/utils.jl:44 =#
-          let (x, y, z, ρ, σ, β) = (u[1], u[2], u[3], p[1], p[2], p[3])
-              ##385[1] = (+)((*)(σ, gam), true)
-              ##385[2] = (*)(gam, (-)(ρ, z), -1, (inv)((+)((*)(σ, gam), true)))
-              ##385[3] = (*)(gam, y, -1, (inv)((+)((*)(σ, gam), true)))
-              ##385[4] = (*)(gam, σ, -1)
-              ##385[5] = (-)((+)(gam, true), (*)(gam, (-)(ρ, z), gam, σ, (inv)((+)((*)(σ, gam), true))))
-              ##385[6] = (*)((-)((*)(gam, x, -1), (*)(gam, y, gam, σ, (inv)((+)((*)(σ, gam), true)))), (inv)((-)((+)(gam, true), (
-*)(gam, (-)(ρ, z), gam, σ, (inv)((+)((*)(σ, gam), true))))))
-              ##385[7] = 0
-              ##385[8] = (-)((*)(x, gam), 0)
-              ##385[9] = (-)((-)((+)((*)(β, gam), true), 0), (*)((-)((*)(gam, x, -1), (*)(gam, y, gam, σ, (inv)((+)((*)(σ, gam), t
-rue)))), (inv)((-)((+)(gam, true), (*)(gam, (-)(ρ, z), gam, σ, (inv)((+)((*)(σ, gam), true))))), (-)((*)(x, gam), 0)))
-          end
-      end)
-
- - -

Solving Nonlinear systems

-

ModelingToolkit.jl is not just for differential equations. It can be used for any mathematical target that is representable by its IR. For example, let's solve a rootfinding problem F(x)=0. What we do is define a nonlinear system and generate a function for use in NLsolve.jl

- - -
-@variables x y z
-@parameters σ ρ β
-
-# Define a nonlinear system
-eqs = [0 ~ σ*(y-x),
-       0 ~ x*(ρ-z)-y,
-       0 ~ x*y - β*z]
-ns = NonlinearSystem(eqs, [x,y,z])
-nlsys_func = generate_function(ns, [x,y,z], [σ,ρ,β])
-
- - -
-:((##387, u, p)->begin
-          #= /home/alex/.julia/packages/ModelingToolkit/S0mks/src/utils.jl:44 =#
-          let (x, y, z, σ, ρ, β) = (u[1], u[2], u[3], p[1], p[2], p[3])
-              ##387[1] = (*)(σ, (-)(y, x))
-              ##387[2] = (-)((*)(x, (-)(ρ, z)), y)
-              ##387[3] = (-)((*)(x, y), (*)(β, z))
-          end
-      end)
-
- - -

We can then tell ModelingToolkit.jl to compile this function for use in NLsolve.jl, and then numerically solve the rootfinding problem:

- - -
-nl_f = @eval eval(nlsys_func)
-# Make a closure over the parameters for for NLsolve.jl
-f2 = (du,u) -> nl_f(du,u,(10.0,26.0,2.33))
-
-using NLsolve
-nlsolve(f2,ones(3))
-
- - -
-Results of Nonlinear Solver Algorithm
- * Algorithm: Trust-region with dogleg and autoscaling
- * Starting Point: [1.0, 1.0, 1.0]
- * Zero: [2.2228e-10, 2.2228e-10, -9.99034e-11]
- * Inf-norm of residuals: 0.000000
- * Iterations: 3
- * Convergence: true
-   * |x - x'| < 0.0e+00: false
-   * |f(x)| < 1.0e-08: true
- * Function Calls (f): 4
- * Jacobian Calls (df/dx): 4
-
- - -

Library of transformations on mathematical systems

-

The reason for using ModelingToolkit is not just for defining performant Julia functions for solving systems, but also for performing mathematical transformations which may be required in order to numerically solve the system. For example, let's solve a third order ODE. The way this is done is by transforming the third order ODE into a first order ODE, and then solving the resulting ODE. This transformation is given by the ode_order_lowering function.

- - -
-@derivatives D3'''~t
-@derivatives D2''~t
-@variables u(t), x(t)
-eqs = [D3(u) ~ 2(D2(u)) + D(u) + D(x) + 1
-       D2(x) ~ D(x) + 2]
-de = ODESystem(eqs)
-de1 = ode_order_lowering(de)
-
- - -
-ModelingToolkit.ODESystem(ModelingToolkit.DiffEq[DiffEq(u_tt, 1, ((2 * u_tt(t()) + u_t(t())) + x_t(t())) + 1), DiffEq(x_t, 1, x_t(
-t()) + 2), DiffEq(u_t, 1, u_tt(t())), DiffEq(u, 1, u_t(t())), DiffEq(x, 1, x_t(t()))], t, ModelingToolkit.Variable[u, x, u_tt, u_t
-, x_t], ModelingToolkit.Variable[], Base.RefValue{Array{ModelingToolkit.Expression,2}}(Array{Expression}(0,0)))
-
- - - -
-de1.eqs
-
- - -
-5-element Array{ModelingToolkit.DiffEq,1}:
- ModelingToolkit.DiffEq(u_tt, 1, ((2 * u_tt(t()) + u_t(t())) + x_t(t())) + 1)
- ModelingToolkit.DiffEq(x_t, 1, x_t(t()) + 2)                                
- ModelingToolkit.DiffEq(u_t, 1, u_tt(t()))                                   
- ModelingToolkit.DiffEq(u, 1, u_t(t()))                                      
- ModelingToolkit.DiffEq(x, 1, x_t(t()))
-
- - -

This has generated a system of 5 first order ODE systems which can now be used in the ODE solvers.

-

Linear Algebra... for free?

-

Let's take a look at how to extend ModelingToolkit.jl in new directions. Let's define a Jacobian just by using the derivative primatives by hand:

- - -
-@parameters t σ ρ β
-@variables x(t) y(t) z(t)
-@derivatives D'~t Dx'~x Dy'~y Dz'~z
-eqs = [D(x) ~ σ*(y-x),
-       D(y) ~ x*(ρ-z)-y,
-       D(z) ~ x*y - β*z]
-J = [Dx(eqs[1].rhs) Dy(eqs[1].rhs) Dz(eqs[1].rhs)
- Dx(eqs[2].rhs) Dy(eqs[2].rhs) Dz(eqs[2].rhs)
- Dx(eqs[3].rhs) Dy(eqs[3].rhs) Dz(eqs[3].rhs)]
-
- - -
-3×3 Array{ModelingToolkit.Operation,2}:
-          (D'~x(t()))(σ() * (y(t()) - x(t())))  …           (D'~z(t()))(σ() * (y(t()) - x(t())))
- (D'~x(t()))(x(t()) * (ρ() - z(t())) - y(t()))     (D'~z(t()))(x(t()) * (ρ() - z(t())) - y(t()))
-   (D'~x(t()))(x(t()) * y(t()) - β() * z(t()))       (D'~z(t()))(x(t()) * y(t()) - β() * z(t()))
-
- - -

Notice that this writes the derivatives in a "lazy" manner. If we want to actually compute the derivatives, we can expand out those expressions:

- - -
-J = expand_derivatives.(J)
-
- - -
-3×3 Array{ModelingToolkit.Expression,2}:
-     σ() * -1           σ()  Constant(0)
- ρ() - z(t())  Constant(-1)  x(t()) * -1
-       y(t())        x(t())     -1 * β()
-
- - -

Here's the magic of ModelingToolkit.jl: Julia treats ModelingToolkit expressions like a Number, and so generic numerical functions are directly usable on ModelingToolkit expressions! Let's compute the LU-factorization of this Jacobian we defined using Julia's Base linear algebra library.

- - -
-using LinearAlgebra
-luJ = lu(J)
-
- - -
-LinearAlgebra.LU{ModelingToolkit.Expression,Array{ModelingToolkit.Expression,2}}
-L factor:
-3×3 Array{ModelingToolkit.Expression,2}:
-                    Constant(1)  …  Constant(0)
- (ρ() - z(t())) * inv(σ() * -1)     identity(0)
-         y(t()) * inv(σ() * -1)     Constant(1)
-U factor:
-3×3 Array{ModelingToolkit.Expression,2}:
-    σ() * -1  …                                                                                                                   
-                                                                     Constant(0)
- identity(0)                                                                                                                      
-                              x(t()) * -1 - ((ρ() - z(t())) * inv(σ() * -1)) * 0
- identity(0)     (-1 * β() - (y(t()) * inv(σ() * -1)) * 0) - ((x(t()) - (y(t()) * inv(σ() * -1)) * σ()) * inv(-1 - ((ρ() - z(t()))
- * inv(σ() * -1)) * σ())) * (x(t()) * -1 - ((ρ() - z(t())) * inv(σ() * -1)) * 0)
-
- - - -
-luJ.L
-
- - -
-3×3 Array{ModelingToolkit.Expression,2}:
-                    Constant(1)  …  Constant(0)
- (ρ() - z(t())) * inv(σ() * -1)     identity(0)
-         y(t()) * inv(σ() * -1)     Constant(1)
-
- - -

and the inverse?

- - -
-invJ = inv(J)
-
- - -
-3×3 Array{ModelingToolkit.Operation,2}:
- (σ() * -1) \ ((identity(true) - identity(0) * (((-1 * β() - (y(t()) * inv(σ() * -1)) * identity(0)) - ((x(t()) - (y(t()) * inv(σ(
-) * -1)) * σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (x(t()) * -1 - ((ρ() - z(t())) * inv(σ() * -1)) * 
-identity(0))) \ ((identity(0) - (y(t()) * inv(σ() * -1)) * identity(true)) - ((x(t()) - (y(t()) * inv(σ() * -1)) * σ()) * inv(iden
-tity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (identity(0) - ((ρ() - z(t())) * inv(σ() * -1)) * identity(true))))) - σ() *
- ((identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ()) \ ((identity(0) - ((ρ() - z(t())) * inv(σ() * -1)) * identity(true)) - (
-x(t()) * -1 - ((ρ() - z(t())) * inv(σ() * -1)) * identity(0)) * (((-1 * β() - (y(t()) * inv(σ() * -1)) * identity(0)) - ((x(t()) -
- (y(t()) * inv(σ() * -1)) * σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (x(t()) * -1 - ((ρ() - z(t())) * 
-inv(σ() * -1)) * identity(0))) \ ((identity(0) - (y(t()) * inv(σ() * -1)) * identity(true)) - ((x(t()) - (y(t()) * inv(σ() * -1)) 
-* σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (identity(0) - ((ρ() - z(t())) * inv(σ() * -1)) * identity(
-true)))))))  …  (σ() * -1) \ ((identity(0) - identity(0) * (((-1 * β() - (y(t()) * inv(σ() * -1)) * identity(0)) - ((x(t()) - (y(t
-()) * inv(σ() * -1)) * σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (x(t()) * -1 - ((ρ() - z(t())) * inv(σ
-() * -1)) * identity(0))) \ ((identity(true) - (y(t()) * inv(σ() * -1)) * identity(0)) - ((x(t()) - (y(t()) * inv(σ() * -1)) * σ()
-) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (identity(0) - ((ρ() - z(t())) * inv(σ() * -1)) * identity(0))))
-) - σ() * ((identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ()) \ ((identity(0) - ((ρ() - z(t())) * inv(σ() * -1)) * identity(0
-)) - (x(t()) * -1 - ((ρ() - z(t())) * inv(σ() * -1)) * identity(0)) * (((-1 * β() - (y(t()) * inv(σ() * -1)) * identity(0)) - ((x(
-t()) - (y(t()) * inv(σ() * -1)) * σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (x(t()) * -1 - ((ρ() - z(t(
-))) * inv(σ() * -1)) * identity(0))) \ ((identity(true) - (y(t()) * inv(σ() * -1)) * identity(0)) - ((x(t()) - (y(t()) * inv(σ() *
- -1)) * σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (identity(0) - ((ρ() - z(t())) * inv(σ() * -1)) * ide
-ntity(0)))))))
-                                                                                                                                  
-                                                                                                                                  
-                                                                                                                                  
-                                                                                                                                  
-    (identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ()) \ ((identity(0) - ((ρ() - z(t())) * inv(σ() * -1)) * identity(true)) -
- (x(t()) * -1 - ((ρ() - z(t())) * inv(σ() * -1)) * identity(0)) * (((-1 * β() - (y(t()) * inv(σ() * -1)) * identity(0)) - ((x(t())
- - (y(t()) * inv(σ() * -1)) * σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (x(t()) * -1 - ((ρ() - z(t())) 
-* inv(σ() * -1)) * identity(0))) \ ((identity(0) - (y(t()) * inv(σ() * -1)) * identity(true)) - ((x(t()) - (y(t()) * inv(σ() * -1)
-) * σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (identity(0) - ((ρ() - z(t())) * inv(σ() * -1)) * identit
-y(true)))))                                                                                                                       
-                                                                                                                                  
-                                                                                                                                  
-                                                                                                                                  
-             (identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ()) \ ((identity(0) - ((ρ() - z(t())) * inv(σ() * -1)) * identity
-(0)) - (x(t()) * -1 - ((ρ() - z(t())) * inv(σ() * -1)) * identity(0)) * (((-1 * β() - (y(t()) * inv(σ() * -1)) * identity(0)) - ((
-x(t()) - (y(t()) * inv(σ() * -1)) * σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (x(t()) * -1 - ((ρ() - z(
-t())) * inv(σ() * -1)) * identity(0))) \ ((identity(true) - (y(t()) * inv(σ() * -1)) * identity(0)) - ((x(t()) - (y(t()) * inv(σ()
- * -1)) * σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (identity(0) - ((ρ() - z(t())) * inv(σ() * -1)) * i
-dentity(0)))))
-                                                                                                                                  
-                                                                                                                                  
-                                                                                                                                  
-                                                                                                                                  
-                                                                                                                                  
-                                                                     ((-1 * β() - (y(t()) * inv(σ() * -1)) * identity(0)) - ((x(t(
-)) - (y(t()) * inv(σ() * -1)) * σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (x(t()) * -1 - ((ρ() - z(t())
-) * inv(σ() * -1)) * identity(0))) \ ((identity(0) - (y(t()) * inv(σ() * -1)) * identity(true)) - ((x(t()) - (y(t()) * inv(σ() * -
-1)) * σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (identity(0) - ((ρ() - z(t())) * inv(σ() * -1)) * ident
-ity(true)))                                                                                                                       
-                                                                                                                                  
-                                                                                                                                  
-                                                                                                                                  
-                                                                                                                                  
-                                                                           ((-1 * β() - (y(t()) * inv(σ() * -1)) * identity(0)) - 
-((x(t()) - (y(t()) * inv(σ() * -1)) * σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (x(t()) * -1 - ((ρ() - 
-z(t())) * inv(σ() * -1)) * identity(0))) \ ((identity(true) - (y(t()) * inv(σ() * -1)) * identity(0)) - ((x(t()) - (y(t()) * inv(σ
-() * -1)) * σ()) * inv(identity(-1) - ((ρ() - z(t())) * inv(σ() * -1)) * σ())) * (identity(0) - ((ρ() - z(t())) * inv(σ() * -1)) *
- identity(0)))
-
- - -

Thus ModelingToolkit.jl can utilize existing numerical code on symbolic codes

-

Let's follow this thread a little deeper.

-

Automatically convert numerical codes to symbolic

-

Let's take someone's code written to numerically solve the Lorenz equation:

- - -
-function lorenz(du,u,p,t)
- du[1] = p[1]*(u[2]-u[1])
- du[2] = u[1]*(p[2]-u[3]) - u[2]
- du[3] = u[1]*u[2] - p[3]*u[3]
-end
-
- - -
-lorenz (generic function with 1 method)
-
- - -

Since ModelingToolkit can trace generic numerical functions in Julia, let's trace it with Operations. When we do this, it'll spit out a symbolic representation of their numerical code:

- - -
-u = [x,y,z]
-du = similar(u)
-p = [σ,ρ,β]
-lorenz(du,u,p,t)
-du
-
- - -
-3-element Array{ModelingToolkit.Operation,1}:
-          σ() * (y(t()) - x(t()))
- x(t()) * (ρ() - z(t())) - y(t())
-   x(t()) * y(t()) - β() * z(t())
-
- - -

We can then perform symbolic manipulations on their numerical code, and build a new numerical code that optimizes/fixes their original function!

- - -
-J = [Dx(du[1]) Dy(du[1]) Dz(du[1])
-     Dx(du[2]) Dy(du[2]) Dz(du[2])
-     Dx(du[3]) Dy(du[3]) Dz(du[3])]
-J = expand_derivatives.(J)
-
- - -
-3×3 Array{ModelingToolkit.Expression,2}:
-     σ() * -1           σ()  Constant(0)
- ρ() - z(t())  Constant(-1)  x(t()) * -1
-       y(t())        x(t())     -1 * β()
-
- - -

Automated Sparsity Detection

-

In many cases one has to speed up large modeling frameworks by taking into account sparsity. While ModelingToolkit.jl can be used to compute Jacobians, we can write a standard Julia function in order to get a spase matrix of expressions which automatically detects and utilizes the sparsity of their function.

- - -
-using SparseArrays
-function SparseArrays.SparseMatrixCSC(M::Matrix{T}) where {T<:ModelingToolkit.Expression}
-    idxs = findall(!iszero, M)
-    I = [i[1] for i in idxs]
-    J = [i[2] for i in idxs]
-    V = [M[i] for i in idxs]
-    return SparseArrays.sparse_IJ_sorted!(I, J, V, size(M)...)
-end
-sJ = SparseMatrixCSC(J)
-
- - -
-3×3 SparseArrays.SparseMatrixCSC{ModelingToolkit.Expression,Int64} with 8 stored entries:
-  [1, 1]  =  σ() * -1
-  [2, 1]  =  ρ() - z(t())
-  [3, 1]  =  y(t())
-  [1, 2]  =  σ()
-  [2, 2]  =  Constant(-1)
-  [3, 2]  =  x(t())
-  [2, 3]  =  x(t()) * -1
-  [3, 3]  =  -1 * β()
-
- - -

Dependent Variables, Functions, Chain Rule

-

"Variables" are overloaded. When you are solving a differential equation, the variable u(t) is actually a function of time. In order to handle these kinds of variables in a mathematically correct and extensible manner, the ModelingToolkit IR actually treats variables as functions, and constant variables are simply 0-ary functions (t()).

-

We can utilize this idea to have parameters that are also functions. For example, we can have a parameter σ which acts as a function of 1 argument, and then utilize this function within our differential equations:

- - -
-@parameters σ(..)
-eqs = [D(x) ~ σ(t-1)*(y-x),
-       D(y) ~ x*(σ(t^2)-z)-y,
-       D(z) ~ x*y - β*z]
-
- - -
-3-element Array{ModelingToolkit.Equation,1}:
- ModelingToolkit.Equation((D'~t())(x(t())), σ(t() - 1) * (y(t()) - x(t())))         
- ModelingToolkit.Equation((D'~t())(y(t())), x(t()) * (σ(t() ^ 2) - z(t())) - y(t()))
- ModelingToolkit.Equation((D'~t())(z(t())), x(t()) * y(t()) - β() * z(t()))
-
- - -

Notice that when we calculate the derivative with respect to t, the chain rule is automatically handled:

- - -
-@derivatives Dₜ'~t
-Dₜ(x*(σ(t^2)-z)-y)
-expand_derivatives(Dₜ(x*(σ(t^2)-z)-y))
-
- - -
-(σ(t() ^ 2) - z(t())) * (D'~t())(x(t())) + x(t()) * ((D'~t())(σ(t() ^ 2)) + -1 * (D'~t())(z(t()))) + -1 * (D'~t())(y(t()))
-
- - -

Hackability: Extend directly from the language

-

ModelingToolkit.jl is written in Julia, and thus it can be directly extended from Julia itself. Let's define a normal Julia function and call it with a variable:

- - -
-_f(x) = 2x + x^2
-_f(x)
-
- - -
-2 * x(t()) + x(t()) ^ 2
-
- - -

Recall that when we do that, it will automatically trace this function and then build a symbolic expression. But what if we wanted our function to be a primative in the symbolic framework? This can be done by registering the function.

- - -
-f(x) = 2x + x^2
-@register f(x)
-
- - -
-f (generic function with 2 methods)
-
- - -

Now this function is a new primitive:

- - -
-f(x)
-
- - -
-Main.WeaveSandBox22.f(x(t()))
-
- - -

and we can now define derivatives of our function:

- - -
-function ModelingToolkit.derivative(::typeof(f), args::NTuple{1,Any}, ::Val{1})
-    2 + 2args[1]
-end
-expand_derivatives(Dx(f(x)))
-
- - -
-2 + 2 * x(t())
-
- - - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("ode_extras","01-ModelingToolkit.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[159f3aea-2a34-519c-b102-8c37f9878175] Cairo 0.6.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.2
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.16.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.1
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/ode_extras/02-feagin.html b/html/ode_extras/02-feagin.html deleted file mode 100644 index c3f24326..00000000 --- a/html/ode_extras/02-feagin.html +++ /dev/null @@ -1,912 +0,0 @@ - - - - - - Feagin's Order 10, 12, and 14 Methods - - - - - - - - - - - - - - - - - -
-
-
- -
-

Feagin's Order 10, 12, and 14 Methods

-
Chris Rackauckas
- -
- -

DifferentialEquations.jl includes Feagin's explicit Runge-Kutta methods of orders 10/8, 12/10, and 14/12. These methods have such high order that it's pretty much required that one uses numbers with more precision than Float64. As a prerequisite reference on how to use arbitrary number systems (including higher precision) in the numerical solvers, please see the Solving Equations in With Chosen Number Types notebook.

-

Investigation of the Method's Error

-

We can use Feagin's order 16 method as follows. Let's use a two-dimensional linear ODE. Like in the Solving Equations in With Chosen Number Types notebook, we change the initial condition to BigFloats to tell the solver to use BigFloat types.

- - -
-using DifferentialEquations
-const linear_bigα = big(1.01)
-f(u,p,t) = (linear_bigα*u)
-
-# Add analytical solution so that errors are checked
-f_analytic(u0,p,t) = u0*exp(linear_bigα*t)
-ff = ODEFunction(f,analytic=f_analytic)
-prob = ODEProblem(ff,big(0.5),(0.0,1.0))
-sol = solve(prob,Feagin14(),dt=1//16,adaptive=false);
-
- - - - -
-println(sol.errors)
-
- - -
-Dict(:l∞=>2.19751e-23,:final=>2.19751e-23,:l2=>1.0615e-23)
-
- - -

Compare that to machine $\epsilon$ for Float64:

- - -
-eps(Float64)
-
- - -
-2.220446049250313e-16
-
- - -

The error for Feagin's method when the stepsize is 1/16 is 8 orders of magnitude below machine $\epsilon$! However, that is dependent on the stepsize. If we instead use adaptive timestepping with the default tolerances, we get

- - -
-sol =solve(prob,Feagin14());
-println(sol.errors); print("The length was $(length(sol))")
-
- - -
-Dict(:l∞=>1.54574e-09,:final=>1.54574e-09,:l2=>8.92507e-10)
-The length was 3
-
- - -

Notice that when the stepsize is much higher, the error goes up quickly as well. These super high order methods are best when used to gain really accurate approximations (using still modest timesteps). Some examples of where such precision is necessary is astrodynamics where the many-body problem is highly chaotic and thus sensitive to small errors.

-

Convergence Test

-

The Order 14 method is awesome, but we need to make sure it's really that awesome. The following convergence test is used in the package tests in order to make sure the implementation is correct. Note that all methods have such tests in place.

- - -
-using DiffEqDevTools
-dts = 1.0 ./ 2.0 .^(10:-1:4)
-sim = test_convergence(dts,prob,Feagin14())
-
- - -
-DiffEqDevTools.ConvergenceSimulation{DiffEqBase.ODESolution{BigFloat,1,Arra
-y{BigFloat,1},Array{BigFloat,1},Dict{Symbol,BigFloat},Array{Float64,1},Arra
-y{Array{BigFloat,1},1},DiffEqBase.ODEProblem{BigFloat,Tuple{Float64,Float64
-},false,Nothing,DiffEqBase.ODEFunction{false,typeof(Main.WeaveSandBox22.f),
-LinearAlgebra.UniformScaling{Bool},typeof(Main.WeaveSandBox22.f_analytic),N
-othing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing},Nothing,Dif
-fEqBase.StandardODEProblem},OrdinaryDiffEq.Feagin14,OrdinaryDiffEq.Interpol
-ationData{DiffEqBase.ODEFunction{false,typeof(Main.WeaveSandBox22.f),Linear
-Algebra.UniformScaling{Bool},typeof(Main.WeaveSandBox22.f_analytic),Nothing
-,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing},Array{BigFloat,1}
-,Array{Float64,1},Array{Array{BigFloat,1},1},OrdinaryDiffEq.Feagin14Constan
-tCache{BigFloat,Float64}},DiffEqBase.DEStats}}(DiffEqBase.ODESolution{BigFl
-oat,1,Array{BigFloat,1},Array{BigFloat,1},Dict{Symbol,BigFloat},Array{Float
-64,1},Array{Array{BigFloat,1},1},DiffEqBase.ODEProblem{BigFloat,Tuple{Float
-64,Float64},false,Nothing,DiffEqBase.ODEFunction{false,typeof(Main.WeaveSan
-dBox22.f),LinearAlgebra.UniformScaling{Bool},typeof(Main.WeaveSandBox22.f_a
-nalytic),Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing},N
-othing,DiffEqBase.StandardODEProblem},OrdinaryDiffEq.Feagin14,OrdinaryDiffE
-q.InterpolationData{DiffEqBase.ODEFunction{false,typeof(Main.WeaveSandBox22
-.f),LinearAlgebra.UniformScaling{Bool},typeof(Main.WeaveSandBox22.f_analyti
-c),Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing},Array{B
-igFloat,1},Array{Float64,1},Array{Array{BigFloat,1},1},OrdinaryDiffEq.Feagi
-n14ConstantCache{BigFloat,Float64}},DiffEqBase.DEStats}[retcode: Success
-Interpolation: 3rd order Hermite
-t: [0.0, 0.000976563, 0.00195313, 0.00292969, 0.00390625, 0.00488281, 0.005
-85938, 0.00683594, 0.0078125, 0.00878906  …  0.991211, 0.992188, 0.993164, 
-0.994141, 0.995117, 0.996094, 0.99707, 0.998047, 0.999023, 1.0]
-u: BigFloat[0.50, 0.500493, 0.500987, 0.501482, 0.501977, 0.502472, 0.50296
-8, 0.503464, 0.503961, 0.504458  …  1.36067, 1.36201, 1.36335, 1.3647, 1.36
-605, 1.3674, 1.36874, 1.3701, 1.37145, 1.3728], retcode: Success
-Interpolation: 3rd order Hermite
-t: [0.0, 0.00195313, 0.00390625, 0.00585938, 0.0078125, 0.00976563, 0.01171
-88, 0.0136719, 0.015625, 0.0175781  …  0.982422, 0.984375, 0.986328, 0.9882
-81, 0.990234, 0.992188, 0.994141, 0.996094, 0.998047, 1.0]
-u: BigFloat[0.50, 0.500987, 0.501977, 0.502968, 0.503961, 0.504956, 0.50595
-3, 0.506952, 0.507953, 0.508956  …  1.34864, 1.35131, 1.35397, 1.35665, 1.3
-5933, 1.36201, 1.3647, 1.3674, 1.3701, 1.3728], retcode: Success
-Interpolation: 3rd order Hermite
-t: [0.0, 0.00390625, 0.0078125, 0.0117188, 0.015625, 0.0195313, 0.0234375, 
-0.0273438, 0.03125, 0.0351563  …  0.964844, 0.96875, 0.972656, 0.976563, 0.
-980469, 0.984375, 0.988281, 0.992188, 0.996094, 1.0]
-u: BigFloat[0.50, 0.501977, 0.503961, 0.505953, 0.507953, 0.509961, 0.51197
-7, 0.514001, 0.516033, 0.518073  …  1.32491, 1.33015, 1.33541, 1.34069, 1.3
-4599, 1.35131, 1.35665, 1.36201, 1.3674, 1.3728], retcode: Success
-Interpolation: 3rd order Hermite
-t: [0.0, 0.0078125, 0.015625, 0.0234375, 0.03125, 0.0390625, 0.046875, 0.05
-46875, 0.0625, 0.0703125  …  0.929688, 0.9375, 0.945313, 0.953125, 0.960938
-, 0.96875, 0.976563, 0.984375, 0.992188, 1.0]
-u: BigFloat[0.50, 0.503961, 0.507953, 0.511977, 0.516033, 0.520121, 0.52424
-1, 0.528394, 0.53258, 0.536799  …  1.27869, 1.28882, 1.29903, 1.30932, 1.31
-969, 1.33015, 1.34069, 1.35131, 1.36201, 1.3728], retcode: Success
-Interpolation: 3rd order Hermite
-t: [0.0, 0.015625, 0.03125, 0.046875, 0.0625, 0.078125, 0.09375, 0.109375, 
-0.125, 0.140625  …  0.859375, 0.875, 0.890625, 0.90625, 0.921875, 0.9375, 0
-.953125, 0.96875, 0.984375, 1.0]
-u: BigFloat[0.50, 0.507953, 0.516033, 0.524241, 0.53258, 0.541051, 0.549658
-, 0.558401, 0.567283, 0.576306  …  1.19103, 1.20998, 1.22923, 1.24878, 1.26
-864, 1.28882, 1.30932, 1.33015, 1.35131, 1.3728], retcode: Success
-Interpolation: 3rd order Hermite
-t: [0.0, 0.03125, 0.0625, 0.09375, 0.125, 0.15625, 0.1875, 0.21875, 0.25, 0
-.28125  …  0.71875, 0.75, 0.78125, 0.8125, 0.84375, 0.875, 0.90625, 0.9375,
- 0.96875, 1.0]
-u: BigFloat[0.50, 0.516033, 0.53258, 0.549658, 0.567283, 0.585473, 0.604247
-, 0.623623, 0.64362, 0.664258  …  1.03333, 1.06647, 1.10067, 1.13596, 1.172
-39, 1.20998, 1.24878, 1.28882, 1.33015, 1.3728], retcode: Success
-Interpolation: 3rd order Hermite
-t: [0.0, 0.0625, 0.125, 0.1875, 0.25, 0.3125, 0.375, 0.4375, 0.5, 0.5625, 0
-.625, 0.6875, 0.75, 0.8125, 0.875, 0.9375, 1.0]
-u: BigFloat[0.50, 0.53258, 0.567283, 0.604247, 0.64362, 0.685558, 0.730229,
- 0.777811, 0.828493, 0.882477, 0.93998, 1.00123, 1.06647, 1.13596, 1.20998,
- 1.28882, 1.3728]], Dict{Any,Any}(:l∞=>BigFloat[3.35435e-49, 5.07978e-45, 6
-.96505e-41, 6.99856e-37, 2.7616e-33, 4.96506e-28, 2.19751e-23],:final=>BigF
-loat[3.35435e-49, 5.07978e-45, 6.96505e-41, 6.99856e-37, 2.7616e-33, 4.9650
-6e-28, 2.19751e-23],:l2=>BigFloat[1.55766e-49, 2.36041e-45, 3.24061e-41, 3.
-26457e-37, 1.29478e-33, 2.35149e-28, 1.0615e-23]), 7, Dict(:dts=>[0.0009765
-63, 0.00195313, 0.00390625, 0.0078125, 0.015625, 0.03125, 0.0625]), Dict{An
-y,Any}(:l∞=>14.2933,:final=>14.2933,:l2=>14.3028), [0.000976563, 0.00195313
-, 0.00390625, 0.0078125, 0.015625, 0.03125, 0.0625])
-
- - -

For a view of what's going on, let's plot the simulation results.

- - -
-using Plots
-gr()
-plot(sim)
-
- - - - -

This is a clear trend indicating that the convergence is truly Order 14, which is the estimated slope.

- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("ode_extras","02-feagin.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/ode_extras/03-ode_minmax.html b/html/ode_extras/03-ode_minmax.html deleted file mode 100644 index 41248685..00000000 --- a/html/ode_extras/03-ode_minmax.html +++ /dev/null @@ -1,1034 +0,0 @@ - - - - - - Finding Maxima and Minima of DiffEq Solutions - - - - - - - - - - - - - - - - - -
-
-
- -
-

Finding Maxima and Minima of DiffEq Solutions

-
Chris Rackauckas
- -
- -

Setup

-

In this tutorial we will show how to use Optim.jl to find the maxima and minima of solutions. Let's take a look at the double pendulum:

- - -
-#Constants and setup
-using OrdinaryDiffEq
-initial = [0.01, 0.01, 0.01, 0.01]
-tspan = (0.,100.)
-
-#Define the problem
-function double_pendulum_hamiltonian(udot,u,p,t)
-    α  = u[1]
-     = u[2]
-    β  = u[3]
-     = u[4]
-    udot .=
-    [2(-(1+cos(β)))/(3-cos(2β)),
-    -2sin(α) - sin(α+β),
-    2(-(1+cos(β)) + (3+2cos(β)))/(3-cos(2β)),
-    -sin(α+β) - 2sin(β)*(((-))/(3-cos(2β))) + 2sin(2β)*((^2 - 2(1+cos(β))* + (3+2cos(β))^2)/(3-cos(2β))^2)]
-end
-
-#Pass to solvers
-poincare = ODEProblem(double_pendulum_hamiltonian, initial, tspan)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 100.0)
-u0: [0.01, 0.01, 0.01, 0.01]
-
- - - -
-sol = solve(poincare, Tsit5())
-
- - -
-retcode: Success
-Interpolation: specialized 4th order "free" interpolation
-t: 193-element Array{Float64,1}:
-   0.0                
-   0.08332584852065579
-   0.24175280271811872
-   0.4389536500504315 
-   0.6797322542488147 
-   0.964763376337819  
-   1.3179449556841032 
-   1.7031210236280163 
-   2.0678477932001846 
-   2.471782525434673  
-   ⋮                  
-  95.84571836675003   
-  96.35777612654726   
-  96.9291238553263    
-  97.4467872981331    
-  97.9624744296349    
-  98.51182496995675   
-  99.06081878698582   
-  99.58283477685029   
- 100.0                
-u: 193-element Array{Array{Float64,1},1}:
- [0.01, 0.01, 0.01, 0.01]                          
- [0.00917069, 0.006669, 0.0124205, 0.00826641]     
- [0.00767328, 0.000374625, 0.0164426, 0.00463683]  
- [0.00612597, -0.00730546, 0.0199674, -0.000336506]
- [0.0049661, -0.0163086, 0.0214407, -0.00670509]   
- [0.00479557, -0.0262381, 0.0188243, -0.0139134]   
- [0.00605469, -0.0371246, 0.0100556, -0.0210382]   
- [0.00790078, -0.046676, -0.00267353, -0.025183]   
- [0.00827652, -0.0527843, -0.0127315, -0.0252581]  
- [0.00552358, -0.0552525, -0.0168439, -0.021899]   
- ⋮                                                 
- [-0.0148868, 0.0423324, 0.0136282, 0.0180291]     
- [-0.00819054, 0.0544225, 0.00944831, 0.0177401]   
- [0.00412448, 0.0567489, -0.00515392, 0.017597]    
- [0.0130796, 0.0480772, -0.0137706, 0.0182866]     
- [0.0153161, 0.0316313, -0.00895722, 0.0171185]    
- [0.0111156, 0.00992938, 0.0072972, 0.0103535]     
- [0.00571392, -0.0117872, 0.020508, -0.00231029]   
- [0.00421143, -0.0299109, 0.0187506, -0.0156505]   
- [0.00574124, -0.0416539, 0.00741327, -0.023349]
-
- - -

In time, the solution looks like:

- - -
-using Plots; gr()
-plot(sol, vars=[(0,3),(0,4)], leg=false, plotdensity=10000)
-
- - - - -

while it has the well-known phase-space plot:

- - -
-plot(sol, vars=(3,4), leg=false)
-
- - - - -

Local Optimization

-

Let's fine out what some of the local maxima and minima are. Optim.jl can be used to minimize functions, and the solution type has a continuous interpolation which can be used. Let's look for the local optima for the 4th variable around t=20. Thus our optimization function is:

- - -
-f = (t) -> sol(t,idxs=4)
-
- - -
-#1 (generic function with 1 method)
-
- - -

first(t) is the same as t[1] which transforms the array of size 1 into a number. idxs=4 is the same as sol(first(t))[4] but does the calculation without a temporary array and thus is faster. To find a local minima, we can simply call Optim on this function. Let's find a local minimum:

- - -
-using Optim
-opt = optimize(f,18.0,22.0)
-
- - -
-Results of Optimization Algorithm
- * Algorithm: Brent's Method
- * Search Interval: [18.000000, 22.000000]
- * Minimizer: 1.863213e+01
- * Minimum: -2.793164e-02
- * Iterations: 11
- * Convergence: max(|x - x_upper|, |x - x_lower|) <= 2*(1.5e-08*|x|+2.2e-16
-): true
- * Objective Function Calls: 12
-
- - -

From this printout we see that the minimum is at t=18.63 and the value is -2.79e-2. We can get these in code-form via:

- - -
-println(opt.minimizer)
-
- - -
-18.632126799595834
-
- - - -
-println(opt.minimum)
-
- - -
--0.027931635264246277
-
- - -

To get the maximum, we just minimize the negative of the function:

- - -
-f = (t) -> -sol(first(t),idxs=4)
-opt2 = optimize(f,0.0,22.0)
-
- - -
-Results of Optimization Algorithm
- * Algorithm: Brent's Method
- * Search Interval: [0.000000, 22.000000]
- * Minimizer: 1.399975e+01
- * Minimum: -2.269411e-02
- * Iterations: 13
- * Convergence: max(|x - x_upper|, |x - x_lower|) <= 2*(1.5e-08*|x|+2.2e-16
-): true
- * Objective Function Calls: 14
-
- - -

Let's add the maxima and minima to the plots:

- - -
-plot(sol, vars=(0,4), plotdensity=10000)
-scatter!([opt.minimizer],[opt.minimum],label="Local Min")
-scatter!([opt2.minimizer],[-opt2.minimum],label="Local Max")
-
- - - - -

Brent's method will locally minimize over the full interval. If we instead want a local maxima nearest to a point, we can use BFGS(). In this case, we need to optimize a vector [t], and thus dereference it to a number using first(t).

- - -
-f = (t) -> -sol(first(t),idxs=4)
-opt = optimize(f,[20.0],BFGS())
-
- - -
-Results of Optimization Algorithm
- * Algorithm: BFGS
- * Starting Point: [20.0]
- * Minimizer: [23.297607288716723]
- * Minimum: -2.588588e-02
- * Iterations: 4
- * Convergence: true
-   * |x - x'| ≤ 0.0e+00: false 
-     |x - x'| = 1.11e-04 
-   * |f(x) - f(x')| ≤ 0.0e+00 |f(x)|: false
-     |f(x) - f(x')| = -6.49e-09 |f(x)|
-   * |g(x)| ≤ 1.0e-08: true 
-     |g(x)| = 8.41e-12 
-   * Stopped by an increasing objective: false
-   * Reached Maximum Number of Iterations: false
- * Objective Calls: 16
- * Gradient Calls: 16
-
- - -

Global Optimization

-

If we instead want to find global maxima and minima, we need to look somewhere else. For this there are many choices. A pure Julia option is BlackBoxOptim.jl, but I will use NLopt.jl. Following the NLopt.jl tutorial but replacing their function with out own:

- - -
-import NLopt, ForwardDiff
-
-count = 0 # keep track of # function evaluations
-
-function g(t::Vector, grad::Vector)
-  if length(grad) > 0
-    #use ForwardDiff for the gradients
-    grad[1] = ForwardDiff.derivative((t)->sol(first(t),idxs=4),t)
-  end
-  sol(first(t),idxs=4)
-end
-opt = NLopt.Opt(:GN_ORIG_DIRECT_L, 1)
-NLopt.lower_bounds!(opt, [0.0])
-NLopt.upper_bounds!(opt, [40.0])
-NLopt.xtol_rel!(opt,1e-8)
-NLopt.min_objective!(opt, g)
-(minf,minx,ret) = NLopt.optimize(opt,[20.0])
-println(minf," ",minx," ",ret)
-
- - -
--0.027931635264246215 [18.6321] XTOL_REACHED
-
- - - -
-NLopt.max_objective!(opt, g)
-(maxf,maxx,ret) = NLopt.optimize(opt,[20.0])
-println(maxf," ",maxx," ",ret)
-
- - -
-0.027968571933041936 [6.5537] XTOL_REACHED
-
- - - -
-plot(sol, vars=(0,4), plotdensity=10000)
-scatter!([minx],[minf],label="Global Min")
-scatter!([maxx],[maxf],label="Global Max")
-
- - - - - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("ode_extras","03-ode_minmax.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/ode_extras/04-monte_carlo_parameter_estim.html b/html/ode_extras/04-monte_carlo_parameter_estim.html deleted file mode 100644 index d9bd1603..00000000 --- a/html/ode_extras/04-monte_carlo_parameter_estim.html +++ /dev/null @@ -1,1106 +0,0 @@ - - - - - - Monte Carlo Parameter Estimation From Data - - - - - - - - - - - - - - - - - -
-
-
- -
-

Monte Carlo Parameter Estimation From Data

-
Chris Rackauckas
- -
- -

First you want to create a problem which solves multiple problems at the same time. This is the Monte Carlo Problem. When the parameter estimation tools say it will take any DEProblem, it really means ANY DEProblem!

-

So, let's get a Monte Carlo problem setup that solves with 10 different initial conditions.

- - -
-using DifferentialEquations, DiffEqParamEstim, Plots, Optim
-
-# Monte Carlo Problem Set Up for solving set of ODEs with different initial conditions
-
-# Set up Lotka-Volterra system
-function pf_func(du,u,p,t)
-  du[1] = p[1] * u[1] - p[2] * u[1]*u[2]
-  du[2] = -3 * u[2] + u[1]*u[2]
-end
-p = [1.5,1.0]
-prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),p)
-
- - -
-ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
-timespan: (0.0, 10.0)
-u0: [1.0, 1.0]
-
- - -

Now for a MonteCarloProblem we have to take this problem and tell it what to do N times via the prob_func. So let's generate N=10 different initial conditions, and tell it to run the same problem but with these 10 different initial conditions each time:

- - -
-# Setting up to solve the problem N times (for the N different initial conditions)
-N = 10;
-initial_conditions = [[1.0,1.0], [1.0,1.5], [1.5,1.0], [1.5,1.5], [0.5,1.0], [1.0,0.5], [0.5,0.5], [2.0,1.0], [1.0,2.0], [2.0,2.0]]
-function prob_func(prob,i,repeat)
-  ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p)
-end
-monte_prob = MonteCarloProblem(prob,prob_func=prob_func)
-
- - -
-MonteCarloProblem with problem ODEProblem
-
- - -

We can check this does what we want by solving it:

- - -
-# Check above does what we want
-sim = solve(monte_prob,Tsit5(),num_monte=N)
-plot(sim)
-
- - - - -

nummonte=N means "run N times", and each time it runs the problem returned by the probfunc, which is always the same problem but with the ith initial condition.

-

Now let's generate a dataset from that. Let's get data points at every t=0.1 using saveat, and then convert the solution into an array.

- - -
-# Generate a dataset from these runs
-data_times = 0.0:0.1:10.0
-sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times)
-data = Array(sim)
-
- - -
-2×101×10 Array{Float64,3}:
-[:, :, 1] =
- 1.0  1.06108   1.14403   1.24917   1.37764   …  0.956979  0.983561  1.0337
-6 
- 1.0  0.821084  0.679053  0.566893  0.478813     1.35559   1.10629   0.9063
-71
-
-[:, :, 2] =
- 1.0  1.01413  1.05394  1.11711   …  1.05324  1.01309  1.00811  1.03162
- 1.5  1.22868  1.00919  0.833191     2.08023  1.70818  1.39973  1.14803
-
-[:, :, 3] =
- 1.5  1.58801   1.70188   1.84193   2.00901   …  2.0153    2.21084   2.4358
-9 
- 1.0  0.864317  0.754624  0.667265  0.599149     0.600942  0.549793  0.5136
-79
-
-[:, :, 4] =
- 1.5  1.51612  1.5621   1.63555   1.73531   …  1.83823   1.98545   2.15958 
- 1.5  1.29176  1.11592  0.969809  0.850159     0.771089  0.691421  0.630025
-
-[:, :, 5] =
- 0.5  0.531705  0.576474  0.634384  0.706139  …  9.05366   9.4006   8.83911
- 1.0  0.77995   0.610654  0.480565  0.380645     0.809382  1.51708  2.82619
-
-[:, :, 6] =
- 1.0  1.11027   1.24238   1.39866   1.58195   …  0.753108  0.748814  0.7682
-84
- 0.5  0.411557  0.342883  0.289812  0.249142     1.73879   1.38829   1.1093
-2 
-
-[:, :, 7] =
- 0.5  0.555757  0.623692  0.705084  0.80158   …  8.11216   9.10671   9.9217
- 
- 0.5  0.390449  0.30679   0.24286   0.193966     0.261298  0.455937  0.8788
-1
-
-[:, :, 8] =
- 2.0  2.11239   2.24921   2.41003   2.59433   …  3.223     3.47362   3.7301
-4 
- 1.0  0.909749  0.838025  0.783532  0.745339     0.739471  0.765597  0.8130
-86
-
-[:, :, 9] =
- 1.0  0.969326  0.971358  1.00017  …  1.25065  1.1012   1.01733  0.979306
- 2.0  1.63445   1.33389   1.09031     3.02671  2.52063  2.07502  1.69807 
-
-[:, :, 10] =
- 2.0  1.92148  1.88215  1.87711  1.90264  …  2.15079   2.27938   2.43105
- 2.0  1.80195  1.61405  1.4426   1.2907      0.957221  0.884827  0.82948
-
- - -

Here, data[i,j,k] is the same as sim[i,j,k] which is the same as sim[k]i,j. So data[i,j,k] is the jth timepoint of the ith variable in the kth trajectory.

-

Now let's build a loss function. A loss function is some loss(sol) that spits out a scalar for how far from optimal we are. In the documentation I show that we normally do loss = L2Loss(t,data), but we can bootstrap off of this. Instead lets build an array of N loss functions, each one with the correct piece of data.

- - -
-# Building a loss function
-losses = [L2Loss(data_times,data[:,:,i]) for i in 1:N]
-
- - -
-10-element Array{DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePre
-cision{Float64},Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Noth
-ing,Nothing},1}:
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [1.0 1.06108 … 0.983561 1.03376; 1.0 0.821084 … 1.10629 0.906371
-], nothing, nothing, nothing, nothing)
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [1.0 1.01413 … 1.00811 1.03162; 1.5 1.22868 … 1.39973 1.14803], 
-nothing, nothing, nothing, nothing)   
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [1.5 1.58801 … 2.21084 2.43589; 1.0 0.864317 … 0.549793 0.513679
-], nothing, nothing, nothing, nothing)
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [1.5 1.51612 … 1.98545 2.15958; 1.5 1.29176 … 0.691421 0.630025]
-, nothing, nothing, nothing, nothing) 
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [0.5 0.531705 … 9.4006 8.83911; 1.0 0.77995 … 1.51708 2.82619], 
-nothing, nothing, nothing, nothing)   
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [1.0 1.11027 … 0.748814 0.768284; 0.5 0.411557 … 1.38829 1.10932
-], nothing, nothing, nothing, nothing)
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [0.5 0.555757 … 9.10671 9.9217; 0.5 0.390449 … 0.455937 0.87881]
-, nothing, nothing, nothing, nothing) 
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [2.0 2.11239 … 3.47362 3.73014; 1.0 0.909749 … 0.765597 0.813086
-], nothing, nothing, nothing, nothing)
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [1.0 0.969326 … 1.01733 0.979306; 2.0 1.63445 … 2.07502 1.69807]
-, nothing, nothing, nothing, nothing) 
- DiffEqParamEstim.L2Loss{StepRangeLen{Float64,Base.TwicePrecision{Float64},
-Base.TwicePrecision{Float64}},Array{Float64,2},Nothing,Nothing,Nothing}(0.0
-:0.1:10.0, [2.0 1.92148 … 2.27938 2.43105; 2.0 1.80195 … 0.884827 0.82948],
- nothing, nothing, nothing, nothing)
-
- - -

So losses[i] is a function which computes the loss of a solution against the data of the ith trajectory. So to build our true loss function, we sum the losses:

- - -
-loss(sim) = sum(losses[i](sim[i]) for i in 1:N)
-
- - -
-loss (generic function with 1 method)
-
- - -

As a double check, make sure that loss(sim) outputs zero (since we generated the data from sim). Now we generate data with other parameters:

- - -
-prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),[1.2,0.8])
-function prob_func(prob,i,repeat)
-  ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p)
-end
-monte_prob = MonteCarloProblem(prob,prob_func=prob_func)
-sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times)
-loss(sim)
-
- - -
-10108.695792027418
-
- - -

and get a non-zero loss. So we now have our problem, our data, and our loss function... we have what we need.

-

Put this into buildlossobjective.

- - -
-obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N,
-                           saveat=data_times)
-
- - -
-(::DiffEqParamEstim.DiffEqObjective{getfield(DiffEqParamEstim, Symbol("##29
-#34")){Nothing,Bool,Int64,typeof(DiffEqParamEstim.STANDARD_PROB_GENERATOR),
-Base.Iterators.Pairs{Symbol,Any,Tuple{Symbol,Symbol},NamedTuple{(:num_monte
-, :saveat),Tuple{Int64,StepRangeLen{Float64,Base.TwicePrecision{Float64},Ba
-se.TwicePrecision{Float64}}}}},DiffEqBase.MonteCarloProblem{DiffEqBase.ODEP
-roblem{Array{Float64,1},Tuple{Float64,Float64},true,Array{Float64,1},DiffEq
-Base.ODEFunction{true,typeof(Main.WeaveSandBox26.pf_func),LinearAlgebra.Uni
-formScaling{Bool},Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,N
-othing,Nothing},Nothing,DiffEqBase.StandardODEProblem},typeof(Main.WeaveSan
-dBox26.prob_func),getfield(DiffEqBase, Symbol("##282#288")),getfield(DiffEq
-Base, Symbol("##284#290")),Array{Any,1}},OrdinaryDiffEq.Tsit5,typeof(Main.W
-eaveSandBox26.loss),Nothing},getfield(DiffEqParamEstim, Symbol("##33#39"))}
-) (generic function with 2 methods)
-
- - -

Notice that I added the kwargs for solve into this. They get passed to an internal solve command, so then the loss is computed on N trajectories at data_times.

-

Thus we take this objective function over to any optimization package. I like to do quick things in Optim.jl. Here, since the Lotka-Volterra equation requires positive parameters, I use Fminbox to make sure the parameters stay positive. I start the optimization with [1.3,0.9], and Optim spits out that the true parameters are:

- - -
-lower = zeros(2)
-upper = fill(2.0,2)
-result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS()))
-
- - -
-Results of Optimization Algorithm
- * Algorithm: Fminbox with BFGS
- * Starting Point: [1.3,0.9]
- * Minimizer: [1.5000000000581937,1.0000000001633538]
- * Minimum: 7.119929e-16
- * Iterations: 4
- * Convergence: true
-   * |x - x'| ≤ 0.0e+00: true 
-     |x - x'| = 0.00e+00 
-   * |f(x) - f(x')| ≤ 0.0e+00 |f(x)|: true
-     |f(x) - f(x')| = 0.00e+00 |f(x)|
-   * |g(x)| ≤ 1.0e-08: false 
-     |g(x)| = 1.07e-06 
-   * Stopped by an increasing objective: true
-   * Reached Maximum Number of Iterations: false
- * Objective Calls: 193
- * Gradient Calls: 193
-
- - - -
-result
-
- - -
-Results of Optimization Algorithm
- * Algorithm: Fminbox with BFGS
- * Starting Point: [1.3,0.9]
- * Minimizer: [1.5000000000581937,1.0000000001633538]
- * Minimum: 7.119929e-16
- * Iterations: 4
- * Convergence: true
-   * |x - x'| ≤ 0.0e+00: true 
-     |x - x'| = 0.00e+00 
-   * |f(x) - f(x')| ≤ 0.0e+00 |f(x)|: true
-     |f(x) - f(x')| = 0.00e+00 |f(x)|
-   * |g(x)| ≤ 1.0e-08: false 
-     |g(x)| = 1.07e-06 
-   * Stopped by an increasing objective: true
-   * Reached Maximum Number of Iterations: false
- * Objective Calls: 193
- * Gradient Calls: 193
-
- - -

Optim finds one but not the other parameter.

-

I would run a test on synthetic data for your problem before using it on real data. Maybe play around with different optimization packages, or add regularization. You may also want to decrease the tolerance of the ODE solvers via

- - -
-obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N,
-                           abstol=1e-8,reltol=1e-8,
-                           saveat=data_times)
-result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS()))
-
- - -
-Results of Optimization Algorithm
- * Algorithm: Fminbox with BFGS
- * Starting Point: [1.3,0.9]
- * Minimizer: [1.500743476201907,1.001238477622136]
- * Minimum: 4.163900e-02
- * Iterations: 5
- * Convergence: true
-   * |x - x'| ≤ 0.0e+00: true 
-     |x - x'| = 0.00e+00 
-   * |f(x) - f(x')| ≤ 0.0e+00 |f(x)|: true
-     |f(x) - f(x')| = 0.00e+00 |f(x)|
-   * |g(x)| ≤ 1.0e-08: false 
-     |g(x)| = 1.07e-06 
-   * Stopped by an increasing objective: false
-   * Reached Maximum Number of Iterations: false
- * Objective Calls: 224
- * Gradient Calls: 224
-
- - - -
-result
-
- - -
-Results of Optimization Algorithm
- * Algorithm: Fminbox with BFGS
- * Starting Point: [1.3,0.9]
- * Minimizer: [1.500743476201907,1.001238477622136]
- * Minimum: 4.163900e-02
- * Iterations: 5
- * Convergence: true
-   * |x - x'| ≤ 0.0e+00: true 
-     |x - x'| = 0.00e+00 
-   * |f(x) - f(x')| ≤ 0.0e+00 |f(x)|: true
-     |f(x) - f(x')| = 0.00e+00 |f(x)|
-   * |g(x)| ≤ 1.0e-08: false 
-     |g(x)| = 1.07e-06 
-   * Stopped by an increasing objective: false
-   * Reached Maximum Number of Iterations: false
- * Objective Calls: 224
- * Gradient Calls: 224
-
- - -

if you suspect error is the problem. However, if you're having problems it's most likely not the ODE solver tolerance and mostly because parameter inference is a very hard optimization problem.

- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("ode_extras","04-monte_carlo_parameter_estim.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/test.html b/html/test.html deleted file mode 100644 index ad839373..00000000 --- a/html/test.html +++ /dev/null @@ -1,743 +0,0 @@ - - - - - - Test - - - - - - - - - - - - - - - - - -
-
-
- -
-

Test

-
Chris Rackauckas
- -
- -

This is a test of the builder system.

- - -
-using DiffEqTutorials
-DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file])
-
- - - -

Appendix

-

These benchmarks are part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file(".","test.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.0
-Commit 80516ca202 (2019-01-21 21:24 UTC)
-Platform Info:
-  OS: Windows (x86_64-w64-mingw32)
-  CPU: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
-Environment:
-  JULIA_EDITOR = "C:\Users\accou\AppData\Local\atom\app-1.34.0\atom.exe" -a
-  JULIA_NUM_THREADS = 6
-
-
-

Package Information:

-
-
Status `C:\Users\accou\.julia\external\DiffEqTutorials.jl\Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.3.6
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 1.0.1
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 0.9.1
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.6.1
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.3.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.7.5
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.17.0
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.17.2
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.3.0
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.23.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.10.3
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.1.0
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.14.0
-[2a06ce6d-1589-592b-9c33-f37faeaed826] UnitfulPlots 0.0.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.7.2
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/type_handling/01-number_types.html b/html/type_handling/01-number_types.html deleted file mode 100644 index 866c76bb..00000000 --- a/html/type_handling/01-number_types.html +++ /dev/null @@ -1,968 +0,0 @@ - - - - - - Solving Equations in With Julia-Defined Types - - - - - - - - - - - - - - - - - -
-
-
- -
-

Solving Equations in With Julia-Defined Types

-
Chris Rackauckas
- -
- -

One of the nice things about DifferentialEquations.jl is that it is designed with Julia's type system in mind. What this means is, if you have properly defined a Number type, you can use this number type in DifferentialEquations.jl's algorithms! [Note that this is restricted to the native algorithms of OrdinaryDiffEq.jl. The other solvers such as ODE.jl, Sundials.jl, and ODEInterface.jl are not compatible with some number systems.]

-

DifferentialEquations.jl determines the numbers to use in its solvers via the types that are designated by tspan and the initial condition of the problem. It will keep the time values in the same type as tspan, and the solution values in the same type as the initial condition. [Note that adaptive timestepping requires that the time type is compaible with sqrt and ^ functions. Thus dt cannot be Integer or numbers like that if adaptive timestepping is chosen].

-

Let's solve the linear ODE first define an easy way to get ODEProblems for the linear ODE:

- - -
-using DifferentialEquations
-f = (u,p,t) -> (p*u)
-prob_ode_linear = ODEProblem(f,1/2,(0.0,1.0),1.01);
-
- - - -

First let's solve it using Float64s. To do so, we just need to set u0 to a Float64 (which is done by the default) and dt should be a float as well.

- - -
-prob = prob_ode_linear
-sol =solve(prob,Tsit5())
-println(sol)
-
- - -
-retcode: Success
-Interpolation: specialized 4th order "free" interpolation
-t: [0.0, 0.0996426, 0.345703, 0.677692, 1.0]
-u: [0.5, 0.552939, 0.708938, 0.99136, 1.3728]
-
- - -

Notice that both the times and the solutions were saved as Float64. Let's change the time to use rational values. Rationals are not compatible with adaptive time stepping since they do not have an L2 norm (this can be worked around by defining internalnorm, but rationals already explode in size!). To account for this, let's turn off adaptivity as well:

- - -
-prob = ODEProblem(f,1/2,(0//1,1//1),101//100);
-sol = solve(prob,RK4(),dt=1//2^(6),adaptive=false)
-println(sol)
-
- - -
-retcode: Success
-Interpolation: 3rd order Hermite
-t: Rational{Int64}[0//1, 1//64, 1//32, 3//64, 1//16, 5//64, 3//32, 7//64, 1
-//8, 9//64, 5//32, 11//64, 3//16, 13//64, 7//32, 15//64, 1//4, 17//64, 9//3
-2, 19//64, 5//16, 21//64, 11//32, 23//64, 3//8, 25//64, 13//32, 27//64, 7//
-16, 29//64, 15//32, 31//64, 1//2, 33//64, 17//32, 35//64, 9//16, 37//64, 19
-//32, 39//64, 5//8, 41//64, 21//32, 43//64, 11//16, 45//64, 23//32, 47//64,
- 3//4, 49//64, 25//32, 51//64, 13//16, 53//64, 27//32, 55//64, 7//8, 57//64
-, 29//32, 59//64, 15//16, 61//64, 31//32, 63//64, 1//1]
-u: [0.5, 0.507953, 0.516033, 0.524241, 0.53258, 0.541051, 0.549658, 0.55840
-1, 0.567283, 0.576306, 0.585473, 0.594786, 0.604247, 0.613858, 0.623623, 0.
-633542, 0.64362, 0.653857, 0.664258, 0.674824, 0.685558, 0.696463, 0.707541
-, 0.718795, 0.730229, 0.741844, 0.753644, 0.765632, 0.777811, 0.790183, 0.8
-02752, 0.815521, 0.828493, 0.841671, 0.855059, 0.86866, 0.882477, 0.896514,
- 0.910775, 0.925262, 0.93998, 0.954931, 0.970121, 0.985552, 1.00123, 1.0171
-5, 1.03333, 1.04977, 1.06647, 1.08343, 1.10067, 1.11817, 1.13596, 1.15403, 
-1.17239, 1.19103, 1.20998, 1.22923, 1.24878, 1.26864, 1.28882, 1.30932, 1.3
-3015, 1.35131, 1.3728]
-
- - -

Now let's do something fun. Let's change the solution to use Rational{BigInt} and print out the value at the end of the simulation. To do so, simply change the definition of the initial condition.

- - -
-prob = ODEProblem(f,BigInt(1)//BigInt(2),(0//1,1//1),101//100);
-sol =solve(prob,RK4(),dt=1//2^(6),adaptive=false)
-println(sol[end])
-
- - -
-415403291938655888343294424838034348376204408921988582429386196369066828013
-380062427154556444246064110042147806995712770513313913105317131993928991562
-472219540324173687134074558951938783349315387199475055050716642476760417033
-833225395963069751630544424879625010648869655282442577465289103178163815663
-464066572670655356269579471636764679863656649012559514171272038086748586891
-653145664881452891757769341753396504927956887980186316721217138912802907978
-839488971277351483679854338427632656105429434285170828205087679096886906512
-836058415177000071451519455149761416134211934766818795085616643778333812510
-724294609438512646808081849075509246961483574876752196687093709017376892988
-720208689912813268920171256693582145356856885176190731036088900945481923320
-301926151164642204512204346142796306783141982263276125756548530824427611816
-333393407861066935488564588880674178922907680658650707284447124975289884078
-283531881659241492248450685643985785207092880524994430296917090030308304496
-2139908567605824428891872081720287044135359380045755621121//302595526357001
-916401850227786985339805854374596312639728370747077589271270423243703004392
-074003302619884721642626495128918849830763359112247111187416392615737498981
-461087857422550657171300852094084580555857942985570738231419687525783564788
-285621871741725085612510228468354691202070954415518824737971685957295081128
-193794470230767667945336581432859330595785427486755359414346047520148998708
-472579747503225700773992946775819105236957926068135290787592745892648489231
-548275787132390564752450502531598102790376905344412549120000000000000000000
-000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000000000000000000000
-0000000000000000000000000000000000000000000
-
- - -

That's one huge fraction!

-

Other Compatible Number Types

-

BigFloats

- - -
-prob_ode_biglinear = ODEProblem(f,big(1.0)/big(2.0),(big(0.0),big(1.0)),big(1.01))
-sol =solve(prob_ode_biglinear,Tsit5())
-println(sol[end])
-
- - -
-1.3728004409038087277892831823141155298533360144614213350145098661946611676
-11229
-
- - -

DoubleFloats.jl

-

There's are Float128-like types. Higher precision, but fixed and faster than arbitrary precision.

- - -
-using DoubleFloats
-prob_ode_doublelinear = ODEProblem(f,Double64(1)/Double64(2),(Double64(0),Double64(1)),Double64(1.01))
-sol =solve(prob_ode_doublelinear,Tsit5())
-println(sol[end])
-
- - -
-1.3728004409038088
-
- - -

ArbFloats

-

These high precision numbers which are much faster than Bigs for less than 500-800 bits of accuracy.

- - -
-using ArbNumerics
-prob_ode_arbfloatlinear = ODEProblem(f,ArbFloat(1)/ArbFloat(2),(ArbFloat(0.0),ArbFloat(1.0)),ArbFloat(1.01))
-sol =solve(prob_ode_arbfloatlinear,Tsit5())
-println(sol[end])
-
- - -
-1.372800440903808727789283182314
-
- - -

Incompatible Number Systems

-

DecFP.jl

-

Next let's try DecFP. DecFP is a fixed-precision decimals library which is made to give both performance but known decimals of accuracy. Having already installed DecFP with ]add DecFP, I can run the following:

- - -
-using DecFP
-prob_ode_decfplinear = ODEProblem(f,Dec128(1)/Dec128(2),(Dec128(0.0),Dec128(1.0)),Dec128(1.01))
-sol =solve(prob_ode_decfplinear,Tsit5())
-
- - -
-ERROR: StackOverflowError:
-
- - - -
-println(sol[end]); println(typeof(sol[end]))
-
- - -
-1.372800440903808727789283182314
-ArbNumerics.ArbFloat{128}
-
- - -

Decimals.jl

-

Install with ]add Decimals.

- - -
-using Decimals
-prob_ode_decimallinear = ODEProblem(f,[decimal("1.0")]./[decimal("2.0")],(0//1,1//1),decimal(1.01))
-sol =solve(prob_ode_decimallinear,RK4(),dt=1/2^(6)) #Fails
-
- - -
-ERROR: MethodError: Decimals.Decimal(::Rational{Int64}) is ambiguous. Candidates:
-  (::Type{T})(x::Rational{S}) where {S, T<:AbstractFloat} in Base at rational.jl:92
-  Decimals.Decimal(num::Real) in Decimals at /home/alex/.julia/packages/Decimals/Qfcas/src/decimal.jl:13
-Possible fix, define
-  Decimals.Decimal(::Rational{S})
-
- - - -
-println(sol[end]); println(typeof(sol[end]))
-
- - -
-1.372800440903808727789283182314
-ArbNumerics.ArbFloat{128}
-
- - -

At the time of writing this, Decimals are not compatible. This is not on DifferentialEquations.jl's end, it's on partly on Decimal's end since it is not a subtype of Number. Thus it's not recommended you use Decimals with DifferentialEquations.jl

-

Conclusion

-

As you can see, DifferentialEquations.jl can use arbitrary Julia-defined number systems in its arithmetic. If you need 128-bit floats, i.e. a bit more precision but not arbitrary, DoubleFloats.jl is a very good choice! For arbitrary precision, ArbNumerics are the most feature-complete and give great performance compared to BigFloats, and thus I recommend their use when high-precision (less than 512-800 bits) is required. DecFP is a great library for high-performance decimal numbers and works well as well. Other number systems could use some modernization.

- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("type_handling","01-number_types.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/type_handling/02-uncertainties.html b/html/type_handling/02-uncertainties.html deleted file mode 100644 index b73bb93c..00000000 --- a/html/type_handling/02-uncertainties.html +++ /dev/null @@ -1,985 +0,0 @@ - - - - - - Numbers with Uncertainties - - - - - - - - - - - - - - - - - -
-
-
- -
-

Numbers with Uncertainties

-
Mosè Giordano, Chris Rackauckas
- -
- -

The result of a measurement should be given as a number with an attached uncertainties, besides the physical unit, and all operations performed involving the result of the measurement should propagate the uncertainty, taking care of correlation between quantities.

-

There is a Julia package for dealing with numbers with uncertainties: Measurements.jl. Thanks to Julia's features, DifferentialEquations.jl easily works together with Measurements.jl out-of-the-box.

-

This notebook will cover some of the examples from the tutorial about classical Physics.

-

Caveat about Measurement type

-

Before going on with the tutorial, we must point up a subtlety of Measurements.jl that you should be aware of:

- - -
-using Measurements
-
-5.23 ± 0.14 === 5.23 ± 0.14
-
- - -
-false
-
- - - -
-(5.23± 0.14) - (5.23 ± 0.14)
-
- - -
-0.0 ± 0.2
-
- - - -
-(5.23 ± 0.14) / (5.23 ± 0.14)
-
- - -
-1.0 ± 0.038
-
- - -

The two numbers above, even though have the same nominal value and the same uncertainties, are actually two different measurements that only by chance share the same figures and their difference and their ratio have a non-zero uncertainty. It is common in physics to get very similar, or even equal, results for a repeated measurement, but the two measurements are not the same thing.

-

Instead, if you have one measurement and want to perform some operations involving it, you have to assign it to a variable:

- - -
-x = 5.23 ± 0.14
-x === x
-
- - -
-true
-
- - - -
-x - x
-
- - -
-0.0 ± 0.0
-
- - - -
-x / x
-
- - -
-1.0 ± 0.0
-
- - -

Radioactive Decay of Carbon-14

-

The rate of decay of carbon-14 is governed by a first order linear ordinary differential equation

-

\[ -\frac{\mathrm{d}u(t)}{\mathrm{d}t} = -\frac{u(t)}{\tau} -\]

-

where $\tau$ is the mean lifetime of carbon-14, which is related to the half-life $t_{1/2} = (5730 \pm 40)$ years by the relation $\tau = t_{1/2}/\ln(2)$.

- - -
-using DifferentialEquations, Measurements, Plots
-
-# Half-life and mean lifetime of radiocarbon, in years
-t_12 = 5730 ± 40
-τ = t_12 / log(2)
-
-#Setup
-u₀ = 1 ± 0
-tspan = (0.0, 10000.0)
-
-#Define the problem
-radioactivedecay(u,p,t) = - u / τ
-
-#Pass to solver
-prob = ODEProblem(radioactivedecay, u₀, tspan)
-sol = solve(prob, Tsit5(), reltol = 1e-8)
-
-# Analytic solution
-u = exp.(- sol.t / τ)
-
-plot(sol.t, sol.u, label = "Numerical", xlabel = "Years", ylabel = "Fraction of Carbon-14")
-plot!(sol.t, u, label = "Analytic")
-
- - - - -

The two curves are perfectly superimposed, indicating that the numerical solution matches the analytic one. We can check that also the uncertainties are correctly propagated in the numerical solution:

- - -
-println("Quantity of carbon-14 after ",  sol.t[11], " years:")
-
- - -
-Quantity of carbon-14 after 5207.522943669727 years:
-
- - - -
-println("Numerical: ", sol[11])
-
- - -
-Numerical: 0.5326215601698016 ± 0.002342211652124845
-
- - - -
-println("Analytic:  ", u[11])
-
- - -
-Analytic:  0.5326215594890371 ± 0.0023422116800320674
-
- - -

Both the value of the numerical solution and its uncertainty match the analytic solution within the requested tolerance. We can also note that close to 5730 years after the beginning of the decay (half-life of the radioisotope), the fraction of carbon-14 that survived is about 0.5.

-

Simple pendulum

-

Small angles approximation

-

The next problem we are going to study is the simple pendulum in the approximation of small angles. We address this simplified case because there exists an easy analytic solution to compare.

-

The differential equation we want to solve is

-

\[ -\ddot{\theta} + \frac{g}{L} \theta = 0 -\]

-

where $g = (9.79 \pm 0.02)~\mathrm{m}/\mathrm{s}^2$ is the gravitational acceleration measured where the experiment is carried out, and $L = (1.00 \pm 0.01)~\mathrm{m}$ is the length of the pendulum.

-

When you set up the problem for DifferentialEquations.jl remember to define the measurements as variables, as seen above.

- - -
-using DifferentialEquations, Measurements, Plots
-
-g = 9.79 ± 0.02; # Gravitational constants
-L = 1.00 ± 0.01; # Length of the pendulum
-
-#Initial Conditions
-u₀ = [0 ± 0, π / 60 ± 0.01] # Initial speed and initial angle
-tspan = (0.0, 6.3)
-
-#Define the problem
-function simplependulum(du,u,p,t)
-    θ  = u[1]
-     = u[2]
-    du[1] = 
-    du[2] = -(g/L)*θ
-end
-
-#Pass to solvers
-prob = ODEProblem(simplependulum, u₀, tspan)
-sol = solve(prob, Tsit5(), reltol = 1e-6)
-
-# Analytic solution
-u = u₀[2] .* cos.(sqrt(g / L) .* sol.t)
-
-plot(sol.t, getindex.(sol.u, 2), label = "Numerical")
-plot!(sol.t, u, label = "Analytic")
-
- - - - -

Also in this case there is a perfect superimposition between the two curves, including their uncertainties.

-

We can also have a look at the difference between the two solutions:

- - -
-plot(sol.t, getindex.(sol.u, 2) .- u, label = "")
-
- - - - -

Arbitrary amplitude

-

Now that we know how to solve differential equations involving numbers with uncertainties we can solve the simple pendulum problem without any approximation. This time the differential equation to solve is the following:

-

\[ -\ddot{\theta} + \frac{g}{L} \sin(\theta) = 0 -\]

- - -
-g = 9.79 ± 0.02; # Gravitational constants
-L = 1.00 ± 0.01; # Length of the pendulum
-
-#Initial Conditions
-u₀ = [0 ± 0, π / 3 ± 0.02] # Initial speed and initial angle
-tspan = (0.0, 6.3)
-
-#Define the problem
-function simplependulum(du,u,p,t)
-    θ  = u[1]
-     = u[2]
-    du[1] = 
-    du[2] = -(g/L) * sin(θ)
-end
-
-#Pass to solvers
-prob = ODEProblem(simplependulum, u₀, tspan)
-sol = solve(prob, Tsit5(), reltol = 1e-6)
-
-plot(sol.t, getindex.(sol.u, 2), label = "Numerical")
-
- - - - -

We note that in this case the period of the oscillations is not constant.

- - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("type_handling","02-uncertainties.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/html/type_handling/03-unitful.html b/html/type_handling/03-unitful.html deleted file mode 100644 index 9d640186..00000000 --- a/html/type_handling/03-unitful.html +++ /dev/null @@ -1,887 +0,0 @@ - - - - - - Unit Checked Arithmetic via Unitful.jl - - - - - - - - - - - - - - - - - -
-
-
- -
-

Unit Checked Arithmetic via Unitful.jl

-
Chris Rackauckas
- -
- -

Units and dimensional analysis are standard tools across the sciences for checking the correctness of your equation. However, most ODE solvers only allow for the equation to be in dimensionless form, leaving it up to the user to both convert the equation to a dimensionless form, punch in the equations, and hopefully not make an error along the way.

-

DifferentialEquations.jl allows for one to use Unitful.jl to have unit-checked arithmetic natively in the solvers. Given the dispatch implementation of the Unitful, this has little overhead.

-

Using Unitful

-

To use Unitful, you need to have the package installed. Then you can add units to your variables. For example:

- - -
-using Unitful
-t = 1.0u"s"
-
- - -
-1.0 s
-
- - -

Notice that t is a variable with units in seconds. If we make another value with seconds, they can add

- - -
-t2 = 1.02u"s"
-t+t2
-
- - -
-2.02 s
-
- - -

and they can multiply:

- - -
-t*t2
-
- - -
-1.02 s^2
-
- - -

You can even do rational roots:

- - -
-sqrt(t)
-
- - -
-1.0 s^1/2
-
- - -

Many operations work. These operations will check to make sure units are correct, and will throw an error for incorrect operations:

- - -
-t + sqrt(t)
-
- - -
-ERROR: DimensionError: 1.0 s and 1.0 s^1/2 are not dimensionally compatible.
-
- - -

Using Unitful with DifferentialEquations.jl

-

Just like with other number systems, you can choose the units for your numbers by simply specifying the units of the initial condition and the timestep. For example, to solve the linear ODE where the variable has units of Newton's and t is in Seconds, we would use:

- - -
-using DifferentialEquations
-f = (y,p,t) -> 0.5*y
-u0 = 1.5u"N"
-prob = ODEProblem(f,u0,(0.0u"s",1.0u"s"))
-sol = solve(prob,Tsit5())
-
- - -
-ERROR: DimensionError: N s^-1 and 0.75 N are not dimensionally compatible.
-
- - -

Notice that we recieved a unit mismatch error. This is correctly so! Remember that for an ODE:

-

\[ -\frac{dy}{dt} = f(t,y) -\]

-

we must have that f is a rate, i.e. f is a change in y per unit time. So we need to fix the units of f in our example to be N/s. Notice that we then do not receive an error if we do the following:

- - -
-f = (y,p,t) -> 0.5*y/3.0u"s"
-prob = ODEProblem(f,u0,(0.0u"s",1.0u"s"))
-sol = solve(prob,Tsit5())
-
- - -
-retcode: Success
-Interpolation: specialized 4th order "free" interpolation
-t: 3-element Array{Unitful.Quantity{Float64,𝐓,Unitful.FreeUnits{(s,),𝐓,nothing}},1}:
-                 0.0 s
- 0.14311598261241779 s
-                 1.0 s
-u: 3-element Array{Unitful.Quantity{Float64,𝐋*𝐌*𝐓^-2,Unitful.FreeUnits{(N,),𝐋*𝐌*𝐓^-2,nothing}},1}:
-                1.5 N
- 1.5362091208988309 N
- 1.7720406194871121 N
-
- - -

This gives a a normal solution object. Notice that the values are all with the correct units:

- - -
-print(sol[:])
-
- - -
-Unitful.Quantity{Float64,𝐋*𝐌*𝐓^-2,Unitful.FreeUnits{(N,),𝐋*𝐌*𝐓^-2,nothing}}[1.5 N, 1.53621 N, 1.77204 N]
-
- - -

We can plot the solution by removing the units:

- - -
-using Plots
-gr()
-plot(ustrip(sol.t),ustrip(sol[:]),lw=3)
-
- - - - - -

Appendix

-

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

-
-

To locally run this tutorial, do the following commands:

-
using DiffEqTutorials
-DiffEqTutorials.weave_file("type_handling","03-unitful.jmd")
-
-

Computer Information:

-
-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
-Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
-  WORD_SIZE: 64
-  LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
-
-
-

Package Information:

-
-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
-[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
-
- - - -
- - - -
-
-
- - diff --git a/notebook/advanced/01-beeler_reuter.ipynb b/notebook/advanced/01-beeler_reuter.ipynb deleted file mode 100644 index 81e48c94..00000000 --- a/notebook/advanced/01-beeler_reuter.ipynb +++ /dev/null @@ -1,348 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# An Implicit/Explicit CUDA-Accelerated Solver for the 2D Beeler-Reuter Model\n### Shahriar Iravanian\n\n## Background\n\n[JuliaDiffEq](https://github.com/JuliaDiffEq) is a suite of optimized Julia libraries to solve ordinary differential equations (ODE). *JuliaDiffEq* provides a large number of explicit and implicit solvers suited for different types of ODE problems. It is possible to reduce a system of partial differential equations into an ODE problem by employing the [method of lines (MOL)](https://en.wikipedia.org/wiki/Method_of_lines). The essence of MOL is to discretize the spatial derivatives (by finite difference, finite volume or finite element methods) into algebraic equations and to keep the time derivatives as is. The resulting differential equations are left with only one independent variable (time) and can be solved with an ODE solver. [Solving Systems of Stochastic PDEs and using GPUs in Julia](http://www.stochasticlifestyle.com/solving-systems-stochastic-pdes-using-gpus-julia/) is a brief introduction to MOL and using GPUs to accelerate PDE solving in *JuliaDiffEq*. Here we expand on this introduction by developing an implicit/explicit (IMEX) solver for a 2D cardiac electrophysiology model and show how to use [CuArray](https://github.com/JuliaGPU/CuArrays.jl) and [CUDAnative](https://github.com/JuliaGPU/CUDAnative.jl) libraries to run the explicit part of the model on a GPU.\n\nNote that this tutorial does not use the [higher order IMEX methods built into DifferentialEquations.jl](http://docs.juliadiffeq.org/latest/solvers/split_ode_solve.html#Implicit-Explicit-(IMEX)-ODE-1) but instead shows how to hand-split an equation when the explicit portion has an analytical solution (or approxiate), which is common in many scenarios.\n\nThere are hundreds of ionic models that describe cardiac electrical activity in various degrees of detail. Most are based on the classic [Hodgkin-Huxley model](https://en.wikipedia.org/wiki/Hodgkin%E2%80%93Huxley_model) and define the time-evolution of different state variables in the form of nonlinear first-order ODEs. The state vector for these models includes the transmembrane potential, gating variables, and ionic concentrations. The coupling between cells is through the transmembrame potential only and is described as a reaction-diffusion equation, which is a parabolic PDE,\n\n$$\\partial V / \\partial t = \\nabla (D \\nabla V) - \\frac {I_\\text{ion}} {C_m},$$\n\nwhere $V$ is the transmembrane potential, $D$ is a diffusion tensor, $I_\\text{ion}$ is the sum of the transmembrane currents and is calculated from the ODEs, and $C_m$ is the membrane capacitance and is usually assumed to be constant. Here we model a uniform and isotropic medium. Therefore, the model can be simplified to,\n\n$$\\partial V / \\partial t = D \\Delta{V} - \\frac {I_\\text{ion}} {C_m},$$\n\nwhere $D$ is now a scalar. By nature, these models have to deal with different time scales and are therefore classified as *stiff*. Commonly, they are solved using the explicit Euler method, usually with a closed form for the integration of the gating variables (the Rush-Larsen method, see below). We can also solve these problems using implicit or semi-implicit PDE solvers (e.g., the [Crank-Nicholson method](https://en.wikipedia.org/wiki/Crank%E2%80%93Nicolson_method) combined with an iterative solver). Higher order explicit methods such as Runge-Kutta and linear multi-step methods cannot overcome the stiffness and are not particularly helpful.\n\nIn this tutorial, we first develop a CPU-only IMEX solver and then show how to move the explicit part to a GPU.\n\n### The Beeler-Reuter Model\n\nWe have chosen the [Beeler-Reuter ventricular ionic model](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1283659/) as our example. It is a classic model first described in 1977 and is used as a base for many other ionic models. It has eight state variables, which makes it complicated enough to be interesting without obscuring the main points of the exercise. The eight state variables are: the transmembrane potential ($V$), sodium-channel activation and inactivation gates ($m$ and $h$, similar to the Hodgkin-Huxley model), with an additional slow inactivation gate ($j$), calcium-channel activation and deactivations gates ($d$ and $f$), a time-dependent inward-rectifying potassium current gate ($x_1$), and intracellular calcium concentration ($c$). There are four currents: a sodium current ($i_{Na}$), a calcium current ($i_{Ca}$), and two potassium currents, one time-dependent ($i_{x_1}$) and one background time-independent ($i_{K_1}$).\n\n## CPU-Only Beeler-Reuter Solver\n\nLet's start by developing a CPU only IMEX solver. The main idea is to use the *DifferentialEquations* framework to handle the implicit part of the equation and code the analytical approximation for explicit part separately. If no analytical approximation was known for the explicit part, one could use methods from [this list](http://docs.juliadiffeq.org/latest/solvers/split_ode_solve.html#Implicit-Explicit-(IMEX)-ODE-1).\n\nFirst, we define the model constants:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "const v0 = -84.624\nconst v1 = 10.0\nconst C_K1 = 1.0f0\nconst C_x1 = 1.0f0\nconst C_Na = 1.0f0\nconst C_s = 1.0f0\nconst D_Ca = 0.0f0\nconst D_Na = 0.0f0\nconst g_s = 0.09f0\nconst g_Na = 4.0f0\nconst g_NaC = 0.005f0\nconst ENa = 50.0f0 + D_Na\nconst γ = 0.5f0\nconst C_m = 1.0f0" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note that the constants are defined as `Float32` and not `Float64`. The reason is that most GPUs have many more single precision cores than double precision ones. To ensure uniformity between CPU and GPU, we also code most states variables as `Float32` except for the transmembrane potential, which is solved by an implicit solver provided by the Sundial library and needs to be `Float64`.\n\n### The State Structure\n\nNext, we define a struct to contain our state. `BeelerReuterCpu` is a functor and we will define a deriv function as its associated function." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "mutable struct BeelerReuterCpu <: Function\n t::Float64 # the last timestep time to calculate Δt\n diff_coef::Float64 # the diffusion-coefficient (coupling strength)\n\n C::Array{Float32, 2} # intracellular calcium concentration\n M::Array{Float32, 2} # sodium current activation gate (m)\n H::Array{Float32, 2} # sodium current inactivation gate (h)\n J::Array{Float32, 2} # sodium current slow inactivaiton gate (j)\n D::Array{Float32, 2} # calcium current activaiton gate (d)\n F::Array{Float32, 2} # calcium current inactivation gate (f)\n XI::Array{Float32, 2} # inward-rectifying potassium current (iK1)\n\n Δu::Array{Float64, 2} # place-holder for the Laplacian\n\n function BeelerReuterCpu(u0, diff_coef)\n self = new()\n\n ny, nx = size(u0)\n self.t = 0.0\n self.diff_coef = diff_coef\n\n self.C = fill(0.0001f0, (ny,nx))\n self.M = fill(0.01f0, (ny,nx))\n self.H = fill(0.988f0, (ny,nx))\n self.J = fill(0.975f0, (ny,nx))\n self.D = fill(0.003f0, (ny,nx))\n self.F = fill(0.994f0, (ny,nx))\n self.XI = fill(0.0001f0, (ny,nx))\n\n self.Δu = zeros(ny,nx)\n\n return self\n end\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Laplacian\n\nThe finite-difference Laplacian is calculated in-place by a 5-point stencil. The Neumann boundary condition is enforced. Note that we could have also used [DiffEqOperators.jl](https://github.com/JuliaDiffEq/DiffEqOperators.jl) to automate this step." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# 5-point stencil\nfunction laplacian(Δu, u)\n n1, n2 = size(u)\n\n # internal nodes\n for j = 2:n2-1\n for i = 2:n1-1\n @inbounds Δu[i,j] = u[i+1,j] + u[i-1,j] + u[i,j+1] + u[i,j-1] - 4*u[i,j]\n end\n end\n\n # left/right edges\n for i = 2:n1-1\n @inbounds Δu[i,1] = u[i+1,1] + u[i-1,1] + 2*u[i,2] - 4*u[i,1]\n @inbounds Δu[i,n2] = u[i+1,n2] + u[i-1,n2] + 2*u[i,n2-1] - 4*u[i,n2]\n end\n\n # top/bottom edges\n for j = 2:n2-1\n @inbounds Δu[1,j] = u[1,j+1] + u[1,j-1] + 2*u[2,j] - 4*u[1,j]\n @inbounds Δu[n1,j] = u[n1,j+1] + u[n1,j-1] + 2*u[n1-1,j] - 4*u[n1,j]\n end\n\n # corners\n @inbounds Δu[1,1] = 2*(u[2,1] + u[1,2]) - 4*u[1,1]\n @inbounds Δu[n1,1] = 2*(u[n1-1,1] + u[n1,2]) - 4*u[n1,1]\n @inbounds Δu[1,n2] = 2*(u[2,n2] + u[1,n2-1]) - 4*u[1,n2]\n @inbounds Δu[n1,n2] = 2*(u[n1-1,n2] + u[n1,n2-1]) - 4*u[n1,n2]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### The Rush-Larsen Method\n\nWe use an explicit solver for all the state variables except for the transmembrane potential which is solved with the help of an implicit solver. The explicit solver is a domain-specific exponential method, the Rush-Larsen method. This method utilizes an approximation on the model in order to transform the IMEX equation into a form suitable for an implicit ODE solver. This combination of implicit and explicit methods forms a specialized IMEX solver. For general IMEX integration, please see the [IMEX solvers documentation](http://docs.juliadiffeq.org/latest/solvers/split_ode_solve.html#Implicit-Explicit-%28IMEX%29-ODE-1). While we could have used the general model to solve the current problem, for this specific model, the transformation approach is more efficient and is of practical interest.\n\nThe [Rush-Larsen](https://ieeexplore.ieee.org/document/4122859/) method replaces the explicit Euler integration for the gating variables with direct integration. The starting point is the general ODE for the gating variables in Hodgkin-Huxley style ODEs,\n\n$$\\frac{dg}{dt} = \\alpha(V) (1 - g) - \\beta(V) g$$\n\nwhere $g$ is a generic gating variable, ranging from 0 to 1, and $\\alpha$ and $\\beta$ are reaction rates. This equation can be written as,\n\n$$\\frac{dg}{dt} = (g_{\\infty} - g) / \\tau_g,$$\n\nwhere $g_\\infty$ and $\\tau_g$ are\n\n$$g_{\\infty} = \\frac{\\alpha}{(\\alpha + \\beta)},$$\n\nand,\n\n$$\\tau_g = \\frac{1}{(\\alpha + \\beta)}.$$\n\nAssuing that $g_\\infty$ and $\\tau_g$ are constant for the duration of a single time step ($\\Delta{t}$), which is a reasonable assumption for most cardiac models, we can integrate directly to have,\n\n$$g(t + \\Delta{t}) = g_{\\infty} - \\left(g_{\\infty} - g(\\Delta{t})\\right)\\,e^{-\\Delta{t}/\\tau_g}.$$\n\nThis is the Rush-Larsen technique. Note that as $\\Delta{t} \\rightarrow 0$, this equations morphs into the explicit Euler formula,\n\n$$g(t + \\Delta{t}) = g(t) + \\Delta{t}\\frac{dg}{dt}.$$\n\n`rush_larsen` is a helper function that use the Rush-Larsen method to integrate the gating variables." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@inline function rush_larsen(g, α, β, Δt)\n inf = α/(α+β)\n τ = 1f0 / (α+β)\n return clamp(g + (g - inf) * expm1(-Δt/τ), 0f0, 1f0)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The gating variables are updated as below. The details of how to calculate $\\alpha$ and $\\beta$ are based on the Beeler-Reuter model and not of direct interest to this tutorial." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function update_M_cpu(g, v, Δt)\n # the condition is needed here to prevent NaN when v == 47.0\n α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (exp(-0.1f0*(v+47.0f0)) - 1.0f0)\n β = (40.0f0 * exp(-0.056f0*(v+72.0f0)))\n return rush_larsen(g, α, β, Δt)\nend\n\nfunction update_H_cpu(g, v, Δt)\n α = 0.126f0 * exp(-0.25f0*(v+77.0f0))\n β = 1.7f0 / (exp(-0.082f0*(v+22.5f0)) + 1.0f0)\n return rush_larsen(g, α, β, Δt)\nend\n\nfunction update_J_cpu(g, v, Δt)\n α = (0.55f0 * exp(-0.25f0*(v+78.0f0))) / (exp(-0.2f0*(v+78.0f0)) + 1.0f0)\n β = 0.3f0 / (exp(-0.1f0*(v+32.0f0)) + 1.0f0)\n return rush_larsen(g, α, β, Δt)\nend\n\nfunction update_D_cpu(g, v, Δt)\n α = γ * (0.095f0 * exp(-0.01f0*(v-5.0f0))) / (exp(-0.072f0*(v-5.0f0)) + 1.0f0)\n β = γ * (0.07f0 * exp(-0.017f0*(v+44.0f0))) / (exp(0.05f0*(v+44.0f0)) + 1.0f0)\n return rush_larsen(g, α, β, Δt)\nend\n\nfunction update_F_cpu(g, v, Δt)\n α = γ * (0.012f0 * exp(-0.008f0*(v+28.0f0))) / (exp(0.15f0*(v+28.0f0)) + 1.0f0)\n β = γ * (0.0065f0 * exp(-0.02f0*(v+30.0f0))) / (exp(-0.2f0*(v+30.0f0)) + 1.0f0)\n return rush_larsen(g, α, β, Δt)\nend\n\nfunction update_XI_cpu(g, v, Δt)\n α = (0.0005f0 * exp(0.083f0*(v+50.0f0))) / (exp(0.057f0*(v+50.0f0)) + 1.0f0)\n β = (0.0013f0 * exp(-0.06f0*(v+20.0f0))) / (exp(-0.04f0*(v+20.0f0)) + 1.0f0)\n return rush_larsen(g, α, β, Δt)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The intracelleular calcium is not technically a gating variable, but we can use a similar explicit exponential integrator for it." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function update_C_cpu(g, d, f, v, Δt)\n ECa = D_Ca - 82.3f0 - 13.0278f0 * log(g)\n kCa = C_s * g_s * d * f\n iCa = kCa * (v - ECa)\n inf = 1.0f-7 * (0.07f0 - g)\n τ = 1f0 / 0.07f0\n return g + (g - inf) * expm1(-Δt/τ)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Implicit Solver\n\nNow, it is time to define the derivative function as an associated function of **BeelerReuterCpu**. We plan to use the CVODE_BDF solver as our implicit portion. Similar to other iterative methods, it calls the deriv function with the same $t$ multiple times. For example, these are consecutive $t$s from a representative run:\n\n0.86830\n0.86830\n0.85485\n0.85485\n0.85485\n0.86359\n0.86359\n0.86359\n0.87233\n0.87233\n0.87233\n0.88598\n...\n\nHere, every time step is called three times. We distinguish between two types of calls to the deriv function. When $t$ changes, the gating variables are updated by calling `update_gates_cpu`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function update_gates_cpu(u, XI, M, H, J, D, F, C, Δt)\n let Δt = Float32(Δt)\n n1, n2 = size(u)\n for j = 1:n2\n for i = 1:n1\n v = Float32(u[i,j])\n\n XI[i,j] = update_XI_cpu(XI[i,j], v, Δt)\n M[i,j] = update_M_cpu(M[i,j], v, Δt)\n H[i,j] = update_H_cpu(H[i,j], v, Δt)\n J[i,j] = update_J_cpu(J[i,j], v, Δt)\n D[i,j] = update_D_cpu(D[i,j], v, Δt)\n F[i,j] = update_F_cpu(F[i,j], v, Δt)\n\n C[i,j] = update_C_cpu(C[i,j], D[i,j], F[i,j], v, Δt)\n end\n end\n end\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "On the other hand, du is updated at each time step, since it is independent of $\\Delta{t}$." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# iK1 is the inward-rectifying potassium current\nfunction calc_iK1(v)\n ea = exp(0.04f0*(v+85f0))\n eb = exp(0.08f0*(v+53f0))\n ec = exp(0.04f0*(v+53f0))\n ed = exp(-0.04f0*(v+23f0))\n return 0.35f0 * (4f0*(ea-1f0)/(eb + ec)\n + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed)))\nend\n\n# ix1 is the time-independent background potassium current\nfunction calc_ix1(v, xi)\n ea = exp(0.04f0*(v+77f0))\n eb = exp(0.04f0*(v+35f0))\n return xi * 0.8f0 * (ea-1f0) / eb\nend\n\n# iNa is the sodium current (similar to the classic Hodgkin-Huxley model)\nfunction calc_iNa(v, m, h, j)\n return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa)\nend\n\n# iCa is the calcium current\nfunction calc_iCa(v, d, f, c)\n ECa = D_Ca - 82.3f0 - 13.0278f0 * log(c) # ECa is the calcium reversal potential\n return C_s * g_s * d * f * (v - ECa)\nend\n\nfunction update_du_cpu(du, u, XI, M, H, J, D, F, C)\n n1, n2 = size(u)\n\n for j = 1:n2\n for i = 1:n1\n v = Float32(u[i,j])\n\n # calculating individual currents\n iK1 = calc_iK1(v)\n ix1 = calc_ix1(v, XI[i,j])\n iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j])\n iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j])\n\n # total current\n I_sum = iK1 + ix1 + iNa + iCa\n\n # the reaction part of the reaction-diffusion equation\n du[i,j] = -I_sum / C_m\n end\n end\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Finally, we put everything together is our deriv function, which is a call on `BeelerReuterCpu`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function (f::BeelerReuterCpu)(du, u, p, t)\n Δt = t - f.t\n\n if Δt != 0 || t == 0\n update_gates_cpu(u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C, Δt)\n f.t = t\n end\n\n laplacian(f.Δu, u)\n\n # calculate the reaction portion\n update_du_cpu(du, u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C)\n\n # ...add the diffusion portion\n du .+= f.diff_coef .* f.Δu\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Results\n\nTime to test! We need to define the starting transmembrane potential with the help of global constants **v0** and **v1**, which represent the resting and activated potentials." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "const N = 192;\nu0 = fill(v0, (N, N));\nu0[90:102,90:102] .= v1; # a small square in the middle of the domain" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The initial condition is a small square in the middle of the domain." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots\nheatmap(u0)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Next, the problem is defined:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, Sundials\n\nderiv_cpu = BeelerReuterCpu(u0, 1.0);\nprob = ODEProblem(deriv_cpu, u0, (0.0, 50.0));" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "For stiff reaction-diffusion equations, CVODE_BDF from Sundial library is an excellent solver." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "heatmap(sol.u[end])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## CPU/GPU Beeler-Reuter Solver\n\nGPUs are great for embarrassingly parallel problems but not so much for highly coupled models. We plan to keep the implicit part on CPU and run the decoupled explicit code on a GPU with the help of the CUDAnative library.\n\n### GPUs and CUDA\n\nIt this section, we present a brief summary of how GPUs (specifically NVIDIA GPUs) work and how to program them using the Julia CUDA interface. The readers who are familiar with these basic concepts may skip this section.\n\nLet's start by looking at the hardware of a typical high-end GPU, GTX 1080. It has four Graphics Processing Clusters (equivalent to a discrete CPU), each harboring five Streaming Multiprocessor (similar to a CPU core). Each SM has 128 single-precision CUDA cores. Therefore, GTX 1080 has a total of 4 x 5 x 128 = 2560 CUDA cores. The maximum theoretical throughput for a GTX 1080 is reported as 8.87 TFLOPS. This figure is calculated for a boost clock frequency of 1.733 MHz as 2 x 2560 x 1.733 MHz = 8.87 TFLOPS. The factor 2 is included because two single floating point operations, a multiplication and an addition, can be done in a clock cycle as part of a fused-multiply-addition FMA operation. GTX 1080 also has 8192 MB of global memory accessible to all the cores (in addition to local and shared memory on each SM).\n\nA typical CUDA application has the following flow:\n\n1. Define and initialize the problem domain tensors (multi-dimensional arrays) in CPU memory.\n2. Allocate corresponding tensors in the GPU global memory.\n3. Transfer the input tensors from CPU to the corresponding GPU tensors.\n4. Invoke CUDA kernels (i.e., the GPU functions callable from CPU) that operate on the GPU tensors.\n5. Transfer the result tensors from GPU back to CPU.\n6. Process tensors on CPU.\n7. Repeat steps 3-6 as needed.\n\nSome libraries, such as [ArrayFire](https://github.com/arrayfire/arrayfire), hide the complexicities of steps 2-5 behind a higher level of abstraction. However, here we take a lower level route. By using [CuArray](https://github.com/JuliaGPU/CuArrays.jl) and [CUDAnative](https://github.com/JuliaGPU/CUDAnative.jl), we achieve a finer-grained control and higher performance. In return, we need to implement each step manually.\n\n*CuArray* is a thin abstraction layer over the CUDA API and allows us to define GPU-side tensors and copy data to and from them but does not provide for operations on tensors. *CUDAnative* is a compiler that translates Julia functions designated as CUDA kernels into ptx (a high-level CUDA assembly language).\n\n### The CUDA Code\n\nThe key to fast CUDA programs is to minimize CPU/GPU memory transfers and global memory accesses. The implicit solver is currently CPU only, but it only needs access to the transmembrane potential. The rest of state variables reside on the GPU memory.\n\nWe modify ``BeelerReuterCpu`` into ``BeelerReuterGpu`` by defining the state variables as *CuArray*s instead of standard Julia *Array*s. The name of each variable defined on GPU is prefixed by *d_* for clarity. Note that $\\Delta{v}$ is a temporary storage for the Laplacian and stays on the CPU side." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using CUDAnative, CuArrays\n\nmutable struct BeelerReuterGpu <: Function\n t::Float64 # the last timestep time to calculate Δt\n diff_coef::Float64 # the diffusion-coefficient (coupling strength)\n\n d_C::CuArray{Float32, 2} # intracellular calcium concentration\n d_M::CuArray{Float32, 2} # sodium current activation gate (m)\n d_H::CuArray{Float32, 2} # sodium current inactivation gate (h)\n d_J::CuArray{Float32, 2} # sodium current slow inactivaiton gate (j)\n d_D::CuArray{Float32, 2} # calcium current activaiton gate (d)\n d_F::CuArray{Float32, 2} # calcium current inactivation gate (f)\n d_XI::CuArray{Float32, 2} # inward-rectifying potassium current (iK1)\n\n d_u::CuArray{Float64, 2} # place-holder for u in the device memory\n d_du::CuArray{Float64, 2} # place-holder for d_u in the device memory\n\n Δv::Array{Float64, 2} # place-holder for voltage gradient\n\n function BeelerReuterGpu(u0, diff_coef)\n self = new()\n\n ny, nx = size(u0)\n @assert (nx % 16 == 0) && (ny % 16 == 0)\n self.t = 0.0\n self.diff_coef = diff_coef\n\n self.d_C = CuArray(fill(0.0001f0, (ny,nx)))\n self.d_M = CuArray(fill(0.01f0, (ny,nx)))\n self.d_H = CuArray(fill(0.988f0, (ny,nx)))\n self.d_J = CuArray(fill(0.975f0, (ny,nx)))\n self.d_D = CuArray(fill(0.003f0, (ny,nx)))\n self.d_F = CuArray(fill(0.994f0, (ny,nx)))\n self.d_XI = CuArray(fill(0.0001f0, (ny,nx)))\n\n self.d_u = CuArray(u0)\n self.d_du = CuArray(zeros(ny,nx))\n\n self.Δv = zeros(ny,nx)\n\n return self\n end\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The Laplacian function remains unchanged. The main change to the explicit gating solvers is that *exp* and *expm1* functions are prefixed by *CUDAnative.*. This is a technical nuisance that will hopefully be resolved in future." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function rush_larsen_gpu(g, α, β, Δt)\n inf = α/(α+β)\n τ = 1.0/(α+β)\n return clamp(g + (g - inf) * CUDAnative.expm1(-Δt/τ), 0f0, 1f0)\nend\n\nfunction update_M_gpu(g, v, Δt)\n # the condition is needed here to prevent NaN when v == 47.0\n α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (CUDAnative.exp(-0.1f0*(v+47.0f0)) - 1.0f0)\n β = (40.0f0 * CUDAnative.exp(-0.056f0*(v+72.0f0)))\n return rush_larsen_gpu(g, α, β, Δt)\nend\n\nfunction update_H_gpu(g, v, Δt)\n α = 0.126f0 * CUDAnative.exp(-0.25f0*(v+77.0f0))\n β = 1.7f0 / (CUDAnative.exp(-0.082f0*(v+22.5f0)) + 1.0f0)\n return rush_larsen_gpu(g, α, β, Δt)\nend\n\nfunction update_J_gpu(g, v, Δt)\n α = (0.55f0 * CUDAnative.exp(-0.25f0*(v+78.0f0))) / (CUDAnative.exp(-0.2f0*(v+78.0f0)) + 1.0f0)\n β = 0.3f0 / (CUDAnative.exp(-0.1f0*(v+32.0f0)) + 1.0f0)\n return rush_larsen_gpu(g, α, β, Δt)\nend\n\nfunction update_D_gpu(g, v, Δt)\n α = γ * (0.095f0 * CUDAnative.exp(-0.01f0*(v-5.0f0))) / (CUDAnative.exp(-0.072f0*(v-5.0f0)) + 1.0f0)\n β = γ * (0.07f0 * CUDAnative.exp(-0.017f0*(v+44.0f0))) / (CUDAnative.exp(0.05f0*(v+44.0f0)) + 1.0f0)\n return rush_larsen_gpu(g, α, β, Δt)\nend\n\nfunction update_F_gpu(g, v, Δt)\n α = γ * (0.012f0 * CUDAnative.exp(-0.008f0*(v+28.0f0))) / (CUDAnative.exp(0.15f0*(v+28.0f0)) + 1.0f0)\n β = γ * (0.0065f0 * CUDAnative.exp(-0.02f0*(v+30.0f0))) / (CUDAnative.exp(-0.2f0*(v+30.0f0)) + 1.0f0)\n return rush_larsen_gpu(g, α, β, Δt)\nend\n\nfunction update_XI_gpu(g, v, Δt)\n α = (0.0005f0 * CUDAnative.exp(0.083f0*(v+50.0f0))) / (CUDAnative.exp(0.057f0*(v+50.0f0)) + 1.0f0)\n β = (0.0013f0 * CUDAnative.exp(-0.06f0*(v+20.0f0))) / (CUDAnative.exp(-0.04f0*(v+20.0f0)) + 1.0f0)\n return rush_larsen_gpu(g, α, β, Δt)\nend\n\nfunction update_C_gpu(c, d, f, v, Δt)\n ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c)\n kCa = C_s * g_s * d * f\n iCa = kCa * (v - ECa)\n inf = 1.0f-7 * (0.07f0 - c)\n τ = 1f0 / 0.07f0\n return c + (c - inf) * CUDAnative.expm1(-Δt/τ)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Similarly, we modify the functions to calculate the individual currents by adding CUDAnative prefix." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# iK1 is the inward-rectifying potassium current\nfunction calc_iK1(v)\n ea = CUDAnative.exp(0.04f0*(v+85f0))\n eb = CUDAnative.exp(0.08f0*(v+53f0))\n ec = CUDAnative.exp(0.04f0*(v+53f0))\n ed = CUDAnative.exp(-0.04f0*(v+23f0))\n return 0.35f0 * (4f0*(ea-1f0)/(eb + ec)\n + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed)))\nend\n\n# ix1 is the time-independent background potassium current\nfunction calc_ix1(v, xi)\n ea = CUDAnative.exp(0.04f0*(v+77f0))\n eb = CUDAnative.exp(0.04f0*(v+35f0))\n return xi * 0.8f0 * (ea-1f0) / eb\nend\n\n# iNa is the sodium current (similar to the classic Hodgkin-Huxley model)\nfunction calc_iNa(v, m, h, j)\n return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa)\nend\n\n# iCa is the calcium current\nfunction calc_iCa(v, d, f, c)\n ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c) # ECa is the calcium reversal potential\n return C_s * g_s * d * f * (v - ECa)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### CUDA Kernels\n\nA CUDA program does not directly deal with GPCs and SMs. The logical view of a CUDA program is in the term of *blocks* and *threads*. We have to specify the number of block and threads when running a CUDA *kernel*. Each thread runs on a single CUDA core. Threads are logically bundled into blocks, which are in turn specified on a grid. The grid stands for the entirety of the domain of interest.\n\nEach thread can find its logical coordinate by using few pre-defined indexing variables (*threadIdx*, *blockIdx*, *blockDim* and *gridDim*) in C/C++ and the corresponding functions (e.g., `threadIdx()`) in Julia. There variables and functions are defined automatically for each thread and may return a different value depending on the calling thread. The return value of these functions is a 1, 2, or 3 dimensional structure whose elements can be accessed as `.x`, `.y`, and `.z` (for a 1-dimensional case, `.x` reports the actual index and `.y` and `.z` simply return 1). For example, if we deploy a kernel in 128 blocks and with 256 threads per block, each thread will see\n\n```\n gridDim.x = 128;\n blockDim=256;\n```\n\nwhile `blockIdx.x` ranges from 0 to 127 in C/C++ and 1 to 128 in Julia. Similarly, `threadIdx.x` will be between 0 to 255 in C/C++ (of course, in Julia the range will be 1 to 256).\n\nA C/C++ thread can calculate its index as\n\n```\n int idx = blockDim.x * blockIdx.x + threadIdx.x;\n```\n\nIn Julia, we have to take into account base 1. Therefore, we use the following formula\n\n```\n idx = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x\n```\n\nA CUDA programmer is free to interpret the calculated index however it fits the application, but in practice, it is usually interpreted as an index into input tensors.\n\nIn the GPU version of the solver, each thread works on a single element of the medium, indexed by a (x,y) pair.\n`update_gates_gpu` and `update_du_gpu` are very similar to their CPU counterparts but are in fact CUDA kernels where the *for* loops are replaced with CUDA specific indexing. Note that CUDA kernels cannot return a valve; hence, *nothing* at the end." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function update_gates_gpu(u, XI, M, H, J, D, F, C, Δt)\n i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x\n j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y\n\n v = Float32(u[i,j])\n\n let Δt = Float32(Δt)\n XI[i,j] = update_XI_gpu(XI[i,j], v, Δt)\n M[i,j] = update_M_gpu(M[i,j], v, Δt)\n H[i,j] = update_H_gpu(H[i,j], v, Δt)\n J[i,j] = update_J_gpu(J[i,j], v, Δt)\n D[i,j] = update_D_gpu(D[i,j], v, Δt)\n F[i,j] = update_F_gpu(F[i,j], v, Δt)\n\n C[i,j] = update_C_gpu(C[i,j], D[i,j], F[i,j], v, Δt)\n end\n nothing\nend\n\nfunction update_du_gpu(du, u, XI, M, H, J, D, F, C)\n i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x\n j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y\n\n v = Float32(u[i,j])\n\n # calculating individual currents\n iK1 = calc_iK1(v)\n ix1 = calc_ix1(v, XI[i,j])\n iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j])\n iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j])\n\n # total current\n I_sum = iK1 + ix1 + iNa + iCa\n\n # the reaction part of the reaction-diffusion equation\n du[i,j] = -I_sum / C_m\n nothing\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Implicit Solver\n\nFinally, the deriv function is modified to copy *u* to GPU and copy *du* back and to invoke CUDA kernels." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function (f::BeelerReuterGpu)(du, u, p, t)\n L = 16 # block size\n Δt = t - f.t\n copyto!(f.d_u, u)\n ny, nx = size(u)\n\n if Δt != 0 || t == 0\n @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_gates_gpu(\n f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C, Δt)\n f.t = t\n end\n\n laplacian(f.Δv, u)\n\n # calculate the reaction portion\n @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_du_gpu(\n f.d_du, f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C)\n\n copyto!(du, f.d_du)\n\n # ...add the diffusion portion\n du .+= f.diff_coef .* f.Δv\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Ready to test!" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, Sundials\n\nderiv_gpu = BeelerReuterGpu(u0, 1.0);\nprob = ODEProblem(deriv_gpu, u0, (0.0, 50.0));\n@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "heatmap(sol.u[end])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Summary\n\nWe achieve around a 6x speedup with running the explicit portion of our IMEX solver on a GPU. The major bottleneck of this technique is the communication between CPU and GPU. In its current form, not all of the internals of the method utilize GPU acceleration. In particular, the implicit equations solved by GMRES are performed on the CPU. This partial CPU nature also increases the amount of data transfer that is required between the GPU and CPU (performed every f call). Compiling the full ODE solver to the GPU would solve both of these issues and potentially give a much larger speedup. [JuliaDiffEq developers are currently working on solutions to alleviate these issues](http://www.stochasticlifestyle.com/solving-systems-stochastic-pdes-using-gpus-julia/), but these will only be compatible with native Julia solvers (and not Sundials)." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/advanced/02-advanced_ODE_solving.ipynb b/notebook/advanced/02-advanced_ODE_solving.ipynb deleted file mode 100644 index 3e01a8ee..00000000 --- a/notebook/advanced/02-advanced_ODE_solving.ipynb +++ /dev/null @@ -1,403 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Solving Stiff Equations\n### Chris Rackauckas\n\nThis tutorial is for getting into the extra features for solving stiff ordinary\ndifferential equations in an efficient manner. Solving stiff ordinary\ndifferential equations requires specializing the linear solver on properties of\nthe Jacobian in order to cut down on the O(n^3) linear solve and the O(n^2)\nback-solves. Note that these same functions and controls also extend to stiff\nSDEs, DDEs, DAEs, etc.\n\n## Code Optimization for Differential Equations\n\n### Writing Efficient Code\n\nFor a detailed tutorial on how to optimize one's DifferentialEquations.jl code,\nplease see the\n[Optimizing DiffEq Code tutorial](http://tutorials.juliadiffeq.org/html/introduction/03-optimizing_diffeq_code.html).\n\n### Choosing a Good Solver\n\nChoosing a good solver is required for getting top notch speed. General\nrecommendations can be found on the solver page (for example, the\n[ODE Solver Recommendations](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html)).\nThe current recommendations can be simplified to a Rosenbrock method\n(`Rosenbrock23` or `Rodas5`) for smaller (<50 ODEs) problems, ESDIRK methods\nfor slightly larger (`TRBDF2` or `KenCarp4` for <2000 ODEs), and Sundials\n`CVODE_BDF` for even larger problems. `lsoda` from\n[LSODA.jl](https://github.com/rveltz/LSODA.jl) is generally worth a try.\n\nMore details on the solver to choose can be found by benchmarking. See the\n[DiffEqBenchmarks](https://github.com/JuliaDiffEq/DiffEqBenchmarks.jl) to\ncompare many solvers on many problems.\n\n### Check Out the Speed FAQ\n\nSee [this FAQ](http://docs.juliadiffeq.org/latest/basics/faq.html#Performance-1)\nfor information on common pitfalls and how to improve performance.\n\n### Setting Up Your Julia Installation for Speed\n\nJulia uses an underlying BLAS implementation for its matrix multiplications\nand factorizations. This library is automatically multithreaded and accelerates\nthe internal linear algebra of DifferentialEquations.jl. However, for optimality,\nyou should make sure that the number of BLAS threads that you are using matches\nthe number of physical cores and not the number of logical cores. See\n[this issue for more details](https://github.com/JuliaLang/julia/issues/33409).\n\nTo check the number of BLAS threads, use:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "ccall((:openblas_get_num_threads64_, Base.libblas_name), Cint, ())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "If I want to set this directly to 4 threads, I would use:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using LinearAlgebra\nLinearAlgebra.BLAS.set_num_threads(4)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Additionally, in some cases Intel's MKL might be a faster BLAS than the standard\nBLAS that ships with Julia (OpenBLAS). To switch your BLAS implementation, you\ncan use [MKL.jl](https://github.com/JuliaComputing/MKL.jl) which will accelerate\nthe linear algebra routines. Please see the package for the limitations.\n\n### Use Accelerator Hardware\n\nWhen possible, use GPUs. If your ODE system is small and you need to solve it\nwith very many different parameters, see the\n[ensembles interface](http://docs.juliadiffeq.org/latest/features/ensemble.html)\nand [DiffEqGPU.jl](https://github.com/JuliaDiffEq/DiffEqGPU.jl). If your problem\nis large, consider using a [CuArray](https://github.com/JuliaGPU/CuArrays.jl)\nfor the state to allow for GPU-parallelism of the internal linear algebra.\n\n## Speeding Up Jacobian Calculations\n\nWhen one is using an implicit or semi-implicit differential equation solver,\nthe Jacobian must be built at many iterations and this can be one of the most\nexpensive steps. There are two pieces that must be optimized in order to reach\nmaximal efficiency when solving stiff equations: the sparsity pattern and the\nconstruction of the Jacobian. The construction is filling the matrix\n`J` with values, while the sparsity pattern is what `J` to use.\n\nThe sparsity pattern is given by a prototype matrix, the `jac_prototype`, which\nwill be copied to be used as `J`. The default is for `J` to be a `Matrix`,\ni.e. a dense matrix. However, if you know the sparsity of your problem, then\nyou can pass a different matrix type. For example, a `SparseMatrixCSC` will\ngive a sparse matrix. Additionally, structured matrix types like `Tridiagonal`,\n`BandedMatrix` (from\n[BandedMatrices.jl](https://github.com/JuliaMatrices/BandedMatrices.jl)),\n`BlockBandedMatrix` (from\n[BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl)),\nand more can be given. DifferentialEquations.jl will internally use this matrix\ntype, making the factorizations faster by utilizing the specialized forms.\n\nFor the construction, there are 3 ways to fill `J`:\n\n- The default, which uses normal finite/automatic differentiation\n- A function `jac(J,u,p,t)` which directly computes the values of `J`\n- A `colorvec` which defines a sparse differentiation scheme.\n\nWe will now showcase how to make use of this functionality with growing complexity.\n\n### Declaring Jacobian Functions\n\nLet's solve the Rosenbrock equations:\n\n$$\\begin{align}\ndy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\\\\ndy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\\\\ndy_3 &= 3*10^7 y_{3}^2 \\\\\n\\end{align}$$\n\nIn order to reduce the Jacobian construction cost, one can describe a Jacobian\nfunction by using the `jac` argument for the `ODEFunction`. First, let's do\na standard `ODEProblem`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations\nfunction rober(du,u,p,t)\n y₁,y₂,y₃ = u\n k₁,k₂,k₃ = p\n du[1] = -k₁*y₁+k₃*y₂*y₃\n du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃\n du[3] = k₂*y₂^2\n nothing\nend\nprob = ODEProblem(rober,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))\nsol = solve(prob,Rosenbrock23())\n\nusing Plots\nplot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using BenchmarkTools\n@btime solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now we want to add the Jacobian. First we have to derive the Jacobian\n$\\frac{df_i}{du_j}$ which is `J[i,j]`. From this we get:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function rober_jac(J,u,p,t)\n y₁,y₂,y₃ = u\n k₁,k₂,k₃ = p\n J[1,1] = k₁ * -1\n J[2,1] = k₁\n J[3,1] = 0\n J[1,2] = y₃ * k₃\n J[2,2] = y₂ * k₂ * -2 + y₃ * k₃ * -1\n J[3,2] = y₂ * 2 * k₂\n J[1,3] = k₃ * y₂\n J[2,3] = k₃ * y₂ * -1\n J[3,3] = 0\n nothing\nend\nf = ODEFunction(rober, jac=rober_jac)\nprob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))\n\n@btime solve(prob_jac)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Automatic Derivation of Jacobian Functions\n\nBut that was hard! If you want to take the symbolic Jacobian of numerical\ncode, we can make use of [ModelingToolkit.jl](https://github.com/JuliaDiffEq/ModelingToolkit.jl)\nto symbolicify the numerical code and do the symbolic calculation and return\nthe Julia code for this." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using ModelingToolkit\nde = modelingtoolkitize(prob)\nModelingToolkit.generate_jacobian(de...)[2] # Second is in-place" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "which outputs:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - ":((##MTIIPVar#376, u, p, t)->begin\n #= C:\\Users\\accou\\.julia\\packages\\ModelingToolkit\\czHtj\\src\\utils.jl:65 =#\n #= C:\\Users\\accou\\.julia\\packages\\ModelingToolkit\\czHtj\\src\\utils.jl:66 =#\n let (x₁, x₂, x₃, α₁, α₂, α₃) = (u[1], u[2], u[3], p[1], p[2], p[3])\n ##MTIIPVar#376[1] = α₁ * -1\n ##MTIIPVar#376[2] = α₁\n ##MTIIPVar#376[3] = 0\n ##MTIIPVar#376[4] = x₃ * α₃\n ##MTIIPVar#376[5] = x₂ * α₂ * -2 + x₃ * α₃ * -1\n ##MTIIPVar#376[6] = x₂ * 2 * α₂\n ##MTIIPVar#376[7] = α₃ * x₂\n ##MTIIPVar#376[8] = α₃ * x₂ * -1\n ##MTIIPVar#376[9] = 0\n end\n #= C:\\Users\\accou\\.julia\\packages\\ModelingToolkit\\czHtj\\src\\utils.jl:67 =#\n nothing\n end)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now let's use that to give the analytical solution Jacobian:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "jac = eval(ModelingToolkit.generate_jacobian(de...)[2])\nf = ODEFunction(rober, jac=jac)\nprob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Declaring a Sparse Jacobian\n\nJacobian sparsity is declared by the `jac_prototype` argument in the `ODEFunction`.\nNote that you should only do this if the sparsity is high, for example, 0.1%\nof the matrix is non-zeros, otherwise the overhead of sparse matrices can be higher\nthan the gains from sparse differentiation!\n\nBut as a demonstration, let's build a sparse matrix for the Rober problem. We\ncan do this by gathering the `I` and `J` pairs for the non-zero components, like:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "I = [1,2,1,2,3,1,2]\nJ = [1,1,2,2,2,3,3]\nusing SparseArrays\njac_prototype = sparse(I,J,1.0)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now this is the sparse matrix prototype that we want to use in our solver, which\nwe then pass like:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f = ODEFunction(rober, jac=jac, jac_prototype=jac_prototype)\nprob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Automatic Sparsity Detection\n\nOne of the useful companion tools for DifferentialEquations.jl is\n[SparsityDetection.jl](https://github.com/JuliaDiffEq/SparsityDetection.jl).\nThis allows for automatic declaration of Jacobian sparsity types. To see this\nin action, let's look at the 2-dimensional Brusselator equation:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "const N = 32\nconst xyd_brusselator = range(0,stop=1,length=N)\nbrusselator_f(x, y, t) = (((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) * (t >= 1.1) * 5.\nlimit(a, N) = a == N+1 ? 1 : a == 0 ? N : a\nfunction brusselator_2d_loop(du, u, p, t)\n A, B, alpha, dx = p\n alpha = alpha/dx^2\n @inbounds for I in CartesianIndices((N, N))\n i, j = Tuple(I)\n x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]]\n ip1, im1, jp1, jm1 = limit(i+1, N), limit(i-1, N), limit(j+1, N), limit(j-1, N)\n du[i,j,1] = alpha*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) +\n B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t)\n du[i,j,2] = alpha*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) +\n A*u[i,j,1] - u[i,j,1]^2*u[i,j,2]\n end\nend\np = (3.4, 1., 10., step(xyd_brusselator))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Given this setup, we can give and example `input` and `output` and call `sparsity!`\non our function with the example arguments and it will kick out a sparse matrix\nwith our pattern, that we can turn into our `jac_prototype`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using SparsityDetection, SparseArrays\ninput = rand(32,32,2)\noutput = similar(input)\nsparsity_pattern = sparsity!(brusselator_2d_loop,output,input,p,0.0)\njac_sparsity = Float64.(sparse(sparsity_pattern))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's double check what our sparsity pattern looks like:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots\nspy(jac_sparsity,markersize=1,colorbar=false,color=:deep)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "That's neat, and would be tedius to build by hand! Now we just pass it to the\n`ODEFunction` like as before:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Build the `ODEProblem`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function init_brusselator_2d(xyd)\n N = length(xyd)\n u = zeros(N, N, 2)\n for I in CartesianIndices((N, N))\n x = xyd[I[1]]\n y = xyd[I[2]]\n u[I,1] = 22*(y*(1-y))^(3/2)\n u[I,2] = 27*(x*(1-x))^(3/2)\n end\n u\nend\nu0 = init_brusselator_2d(xyd_brusselator)\nprob_ode_brusselator_2d = ODEProblem(brusselator_2d_loop,\n u0,(0.,11.5),p)\n\nprob_ode_brusselator_2d_sparse = ODEProblem(f,\n u0,(0.,11.5),p)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now let's see how the version with sparsity compares to the version without:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@btime solve(prob_ode_brusselator_2d,save_everystep=false)\n@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Declaring Color Vectors for Fast Construction\n\nIf you cannot directly define a Jacobian function, you can use the `colorvec`\nto speed up the Jacobian construction. What the `colorvec` does is allows for\ncalculating multiple columns of a Jacobian simultaniously by using the sparsity\npattern. An explanation of matrix coloring can be found in the\n[MIT 18.337 Lecture Notes](https://mitmath.github.io/18337/lecture9/stiff_odes).\n\nTo perform general matrix coloring, we can use\n[SparseDiffTools.jl](https://github.com/JuliaDiffEq/SparseDiffTools.jl). For\nexample, for the Brusselator equation:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using SparseDiffTools\ncolorvec = matrix_colors(jac_sparsity)\n@show maximum(colorvec)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This means that we can now calculate the Jacobian in 12 function calls. This is\na nice reduction from 2048 using only automated tooling! To now make use of this\ninside of the ODE solver, you simply need to declare the colorvec:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity,\n colorvec=colorvec)\nprob_ode_brusselator_2d_sparse = ODEProblem(f,\n init_brusselator_2d(xyd_brusselator),\n (0.,11.5),p)\n@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice the massive speed enhancement!\n\n## Defining Linear Solver Routines and Jacobian-Free Newton-Krylov\n\nA completely different way to optimize the linear solvers for large sparse\nmatrices is to use a Krylov subpsace method. This requires choosing a linear\nsolver for changing to a Krylov method. Optionally, one can use a Jacobian-free\noperator to reduce the memory requirements.\n\n### Declaring a Jacobian-Free Newton-Krylov Implementation\n\nTo swap the linear solver out, we use the `linsolve` command and choose the\nGMRES linear solver." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@btime solve(prob_ode_brusselator_2d,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)\n@btime solve(prob_ode_brusselator_2d_sparse,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "For more information on linear solver choices, see the\n[linear solver documentation](http://docs.juliadiffeq.org/latest/features/linear_nonlinear.html).\n\nOn this problem, handling the sparsity correctly seemed to give much more of a\nspeedup than going to a Krylov approach, but that can be dependent on the problem\n(and whether a good preconditioner is found).\n\nWe can also enhance this by using a Jacobian-Free implementation of `f'(x)*v`.\nTo define the Jacobian-Free operator, we can use\n[DiffEqOperators.jl](https://github.com/JuliaDiffEq/DiffEqOperators.jl) to generate\nan operator `JacVecOperator` such that `Jv*v` performs `f'(x)*v` without building\nthe Jacobian matrix." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DiffEqOperators\nJv = JacVecOperator(brusselator_2d_loop,u0,p,0.0)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and then we can use this by making it our `jac_prototype`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f = ODEFunction(brusselator_2d_loop;jac_prototype=Jv)\nprob_ode_brusselator_2d_jacfree = ODEProblem(f,u0,(0.,11.5),p)\n@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Adding a Preconditioner\n\nThe [linear solver documentation](http://docs.juliadiffeq.org/latest/features/linear_nonlinear.html#IterativeSolvers.jl-Based-Methods-1)\nshows how you can add a preconditioner to the GMRES. For example, you can\nuse packages like [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl)\nto add an algebraic multigrid (AMG) or [IncompleteLU.jl](https://github.com/haampie/IncompleteLU.jl)\nfor an incomplete LU-factorization (iLU)." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using AlgebraicMultigrid\npc = aspreconditioner(ruge_stuben(jac_sparsity))\n@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES(Pl=pc)),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Using Structured Matrix Types\n\nIf your sparsity pattern follows a specific structure, for example a banded\nmatrix, then you can declare `jac_prototype` to be of that structure and then\nadditional optimizations will come for free. Note that in this case, it is\nnot necessary to provide a `colorvec` since the color vector will be analytically\nderived from the structure of the matrix.\n\nThe matrices which are allowed are those which satisfy the\n[ArrayInterface.jl](https://github.com/JuliaDiffEq/ArrayInterface.jl) interface\nfor automatically-colorable matrices. These include:\n\n- Bidiagonal\n- Tridiagonal\n- SymTridiagonal\n- BandedMatrix ([BandedMatrices.jl](https://github.com/JuliaMatrices/BandedMatrices.jl))\n- BlockBandedMatrix ([BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl))\n\nMatrices which do not satisfy this interface can still be used, but the matrix\ncoloring will not be automatic, and an appropriate linear solver may need to\nbe given (otherwise it will default to attempting an LU-decomposition).\n\n## Sundials-Specific Handling\n\nWhile much of the setup makes the transition to using Sundials automatic, there\nare some differences between the pure Julia implementations and the Sundials\nimplementations which must be taken note of. These are all detailed in the\n[Sundials solver documentation](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html#Sundials.jl-1),\nbut here we will highlight the main details which one should make note of.\n\nDefining a sparse matrix and a Jacobian for Sundials works just like any other\npackage. The core difference is in the choice of the linear solver. With Sundials,\nthe linear solver choice is done with a Symbol in the `linear_solver` from a\npreset list. Particular choices of note are `:Band` for a banded matrix and\n`:GMRES` for using GMRES. If you are using Sundials, `:GMRES` will not require\ndefining the JacVecOperator, and instead will always make use of a Jacobian-Free\nNewton Krylov (with numerical differentiation). Thus on this problem we could do:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Sundials\n# Sparse Version\n@btime solve(prob_ode_brusselator_2d_sparse,CVODE_BDF(),save_everystep=false)\n# GMRES Version: Doesn't require any extra stuff!\n@btime solve(prob_ode_brusselator_2d,CVODE_BDF(linear_solver=:GMRES),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Details for setting up a preconditioner with Sundials can be found at the\n[Sundials solver page](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html#Sundials.jl-1).\n\n## Handling Mass Matrices\n\nInstead of just defining an ODE as $u' = f(u,p,t)$, it can be common to express\nthe differential equation in the form with a mass matrix:\n\n$$Mu' = f(u,p,t)$$\n\nwhere $M$ is known as the mass matrix. Let's solve the Robertson equation.\nAt the top we wrote this equation as:\n\n$$\\begin{align}\ndy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\\\\ndy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\\\\ndy_3 &= 3*10^7 y_{3}^2 \\\\\n\\end{align}$$\n\nBut we can instead write this with a conservation relation:\n\n$$\\begin{align}\ndy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\\\\ndy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\\\\n1 &= y_{1} + y_{2} + y_{3} \\\\\n\\end{align}$$\n\nIn this form, we can write this as a mass matrix ODE where $M$ is singular\n(this is another form of a differential-algebraic equation (DAE)). Here, the\nlast row of `M` is just zero. We can implement this form as:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations\nfunction rober(du,u,p,t)\n y₁,y₂,y₃ = u\n k₁,k₂,k₃ = p\n du[1] = -k₁*y₁+k₃*y₂*y₃\n du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃\n du[3] = y₁ + y₂ + y₃ - 1\n nothing\nend\nM = [1. 0 0\n 0 1. 0\n 0 0 0]\nf = ODEFunction(rober,mass_matrix=M)\nprob_mm = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))\nsol = solve(prob_mm,Rodas5())\n\nplot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note that if your mass matrix is singular, i.e. your system is a DAE, then you\nneed to make sure you choose\n[a solver that is compatible with DAEs](http://docs.juliadiffeq.org/latest/solvers/dae_solve.html#Full-List-of-Methods-1)" - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.2.0" - }, - "kernelspec": { - "name": "julia-1.2", - "display_name": "Julia 1.2.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/exercises/01-workshop_exercises.ipynb b/notebook/exercises/01-workshop_exercises.ipynb deleted file mode 100644 index 7d4b686a..00000000 --- a/notebook/exercises/01-workshop_exercises.ipynb +++ /dev/null @@ -1,122 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# DifferentialEquations.jl Workshop Exercises\n### Chris Rackauckas\n\nThese exercises teach common workflows which involve DifferentialEquations.jl.\nThe designation (B) is for \"Beginner\", meaning that a user new to the package\nshould feel comfortable trying this exercise. An exercise designated (I) is\nfor \"Intermediate\", meaning the user may want to have some previous background\nin DifferentialEquations.jl or try some (B) exercises first. The additional\n(E) designation is for \"Experienced\", which are portions of exercises which may\ntake some work.\n\nThe exercises are described as follows:\n\n- Exercise 1 takes the user through solving a stiff ordinary differential equation\n and using the ModelingToolkit.jl to automatically convert the function to a\n symbolic form to derive the analytical Jacobian to speed up the solver. The\n same biological system is then solved with stochasticity, utilizing\n EnsembleProblems to understand 95% bounds on the solution. Finally,\n probabilistic programming is employed to perform Bayesian parameter estimation\n of the parameters against data.\n- Exercise 2 takes the user through defining hybrid delay differential equation,\n that is a differential equation with events, and using differentiable programming\n techniques (automatic differentiation) to to perform gradient-based parameter\n estimation.\n- Exercise 3 takes the user through differential-algebraic equation (DAE)\n modeling, the concept of index, and using both mass-matrix and implicit\n ODE representations. This will require doing a bit of math, but the student\n will understand how to change their equations to make their DAE numerically\n easier for the integrators. \n- Exercise 4 takes the user through optimizing a PDE solver, utilizing\n automatic sparsity pattern recognition, automatic conversion of numerical\n codes to symbolic codes for analytical construction of the Jacobian,\n preconditioned GMRES, and setting up a solver for IMEX and GPUs, and compute\n adjoints of PDEs.\n- Exercise 5 focuses on a chaotic orbit, utilizing parallel ensembles across\n supercomputers and GPUs to quickly describe phase space.\n- Exercise 6 takes the user through training a neural stochastic differential\n equation, using GPU-accleration and adjoints through Flux.jl's neural\n network framework to build efficient training codes.\n\nThis exercise worksheet is meant to be a living document leading new users through\na deep dive of the DifferentialEquations.jl feature set. If you further suggestions\nor want to contribute new problems, please open an issue or PR at the\nDiffEqTutorials.jl repository.\n\n# Problem 1: Investigating Sources of Randomness and Uncertainty in a Stiff Biological System (B)\n\nIn this problem we will walk through the basics of simulating models with\nDifferentialEquations.jl. Let's take the\n[Oregonator model of the Belousov-Zhabotinskii chemical reaction system](https://www.radford.edu/~thompson/vodef90web/problems/demosnodislin/Demos_Pitagora/DemoOrego/demoorego.pdf).\nThis system describes a classical example in non-equilibrium thermodynmics\nand is a well-known natural chemical oscillator.\n\n## Part 1: Simulating the Oregonator ODE model\n\nWhen modeling, usually one starts off by investigating the deterministic model.\nThe deterministic ODE formulation of the Oregonator is\ngiven by the equations\n\n$$\\begin{align}\n\\frac{dx}{dt} &= s(y-xy + x - qx^2)\\\\\n\\frac{dy}{dt} &= (-y - xy + z)/s\\\\\n\\frac{dz}{dt} &= w(x - z)\\end{align}$$\n\nwith parameter values $s=77.27$, $w=0.161$, and $q=8.375 \\times 10^{-6}$, and\ninitial conditions $x(0)=1$, $y(0)=2$, and $z(0)=3$. Use\n[the tutorial on solving ODEs](http://docs.juliadiffeq.org/latest/tutorials/ode_example.html)\nto solve this differential equation on the\ntimespan of $t\\in[0,360]$ with the default ODE solver. To investigate the result,\nplot the solution of all components over time, and plot the phase space plot of\nthe solution (hint: use `vars=(1,2,3)`). What shape is being drawn in phase space?\n\n## Part 2: Investigating Stiffness\n\nBecause the reaction rates of `q` vs `s` is very large, this model has a \"fast\"\nsystem and a \"slow\" system. This is typical of ODEs which exhibit a property\nknown as stiffness. Stiffness changes the ODE solvers which can handle the\nequation well. [Take a look at the ODE solver page](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html)\nand investigate solving the equation using methods for non-stiff equations\n(ex: `Tsit5`) and stiff equations (ex: `Rodas5`).\n\nBenchmark using $t\\in[0,50]$ using `@btime` from BenchmarkTools.jl. What\nhappens when you increase the timespan?\n\n## (Optional) Part 3: Specifying Analytical Jacobians (I)\n\nStiff ODE solvers internally utilize the Jacobian of the ODE system in order\nto improve the stepsizes in the solution. However, computing and factorizing\nthe Jacobian is costly, and thus it can be beneficial to provide the analytical\nsolution.\n\nUse the\n[ODEFunction definition page](http://docs.juliadiffeq.org/latest/features/performance_overloads.html)\nto define an `ODEFunction` which holds both the OREGO ODE and its Jacobian, and solve using `Rodas5`.\n\n## (Optional) Part 4: Automatic Symbolicification and Analytical Jacobian Calculations\n\nDeriving Jacobians by hand is tedious. Thankfully symbolic mathematical systems\ncan do the work for you. And thankfully, DifferentialEquations.jl has tools\nto automatically convert numerical problems into symbolic problems to perform\nthe analysis on!\n\nfollow the [ModelingToolkit.jl README](https://github.com/JuliaDiffEq/ModelingToolkit.jl)\nto automatically convert your ODE definition\nto its symbolic form using `modelingtoolkitize` and calculate the analytical\nJacobian. Use the compilation functions to build the `ODEFunction` with the\nembedded analytical solution.\n\n## Part 5: Adding stochasticity with stochastic differential equations\n\nHow does this system react in the presense of stochasticity? We can investigate\nthis question by using stochastic differential equations. A stochastic\ndifferential equation formulation of this model is known as the multiplicative\nnoise model, is created with:\n\n$$\\begin{align}\ndx &= s(y-xy + x - qx^2)dt + \\sigma_1 x dW_1\\\\\ndy &= \\frac{-y - xy + z}{s}dt + \\sigma_2 y dW_2\\\\\ndz &= w(x - z)dt + \\sigma_3 z dW_3\\end{align}$$\n\nwith $\\sigma_i = 0.1$ where the `dW` terms describe a Brownian motion, a\ncontinuous random process with normally distributed increments. Use the\n[tutorial on solving SDEs](http://docs.juliadiffeq.org/latest/tutorials/sde_example.html)\nto solve simulate this model. Then,\n[use the `EnsembleProblem`](http://docs.juliadiffeq.org/latest/features/ensemble.html)\nto generate and plot 100 trajectories of the stochastic model, and use\n`EnsembleSummary` to plot the mean and 5%-95% region over time.\n\nTry solving with the `ImplicitRKMil` and `SOSRI` methods. Notice that it isn't\nstiff every single time!\n\n(For fun, see if you can make the Euler-Maruyama `EM()` method solve this equation.\nThis requires a choice of `dt` small enough to be stable. This is the \"standard\"\nmethod!)\n\n## Part 6: Gillespie jump models of discrete stochasticity\n\nWhen biological models have very few particles, continuous models no longer\nmake sense, and instead using the full discrete formulation can be required\nto accuracy describe the dynamics. A discrete differential equation, or\nGillespie model, is a continuous-time Markov chain with Poisson-distributed\njumps. A discrete description of the Oregonator model is given by a chemical\nreaction systems:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "A+Y -> X+P\nX+Y -> 2P\nA+X -> 2X + 2Z\n2X -> A + P (note: this has rate kX^2!)\nB + Z -> Y" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "where reactions take place at a rate which is propoertional to its components,\ni.e. the first reaction has a rate `k*A*Y` for some `k`.\nUse the [tutorial on Gillespie SSA models](http://docs.juliadiffeq.org/latest/tutorials/discrete_stochastic_example.html)\nto implement the `JumpProblem` for this model, and use the `EnsembleProblem`\nand `EnsembleSummary` to characterize the stochastic trajectories.\n\nFor what rate constants does the model give the oscillatory dynamics for the\nODE approximation? For information on the true reaction rates, consult\n[the original paper](https://pubs.acs.org/doi/abs/10.1021/ja00780a001).\n\n## Part 7: Probabilistic Programming / Bayesian Parameter Estimation with DiffEqBayes.jl + Turing.jl (I)\n\nIn many casees, one comes to understand the proper values for their model's\nparameters by utilizing data fitting techniques. In this case, we will use\nthe DiffEqBayes.jl library to perform a Bayesian estimation of the parameters.\nFor our data we will the following potential output:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "t = 0.0:1.0:30.0\ndata = [1.0 2.05224 2.11422 2.1857 2.26827 2.3641 2.47618 2.60869 2.7677 2.96232 3.20711 3.52709 3.97005 4.64319 5.86202 9.29322 536.068 82388.9 57868.4 1.00399 1.00169 1.00117 1.00094 1.00082 1.00075 1.0007 1.00068 1.00066 1.00065 1.00065 1.00065\n 2.0 1.9494 1.89645 1.84227 1.78727 1.73178 1.67601 1.62008 1.56402 1.50772 1.45094 1.39322 1.33366 1.2705 1.19958 1.10651 0.57194 0.180316 0.431409 251.774 591.754 857.464 1062.78 1219.05 1335.56 1419.88 1478.22 1515.63 1536.25 1543.45 1539.98\n 3.0 2.82065 2.68703 2.58974 2.52405 2.48644 2.47449 2.48686 2.52337 2.58526 2.67563 2.80053 2.9713 3.21051 3.5712 4.23706 12.0266 14868.8 24987.8 23453.4 19202.2 15721.6 12872.0 10538.8 8628.66 7064.73 5784.29 4735.96 3877.66 3174.94 2599.6]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "[Follow the exmaples on the parameter estimation page](http://docs.juliadiffeq.org/latest/analysis/parameter_estimation.html#Bayesian-Methods-1)\nto perform a Bayesian parameter estimation. What are the most likely parameters\nfor the model given the posterior parameter distributions?\n\nUse the `ODEProblem` to perform the fit. If you have time, use the `EnsembleProblem`\nof `SDEProblem`s to perform a fit over averages of the SDE solutions. Note that\nthe SDE fit will take significantly more computational resources! See the GPU\nparallelism section for details on how to accelerate this.\n\n## (Optional) Part 8: Using DiffEqBiological's Reaction Network DSL\n\nDiffEqBiological.jl is a helper library for the DifferentialEquations.jl\necosystem for defining chemical reaction systems at a high leevel for easy\nsimulation in these various forms. Use the descrption\n[from the Chemical Reaction Networks documentation page](http://docs.juliadiffeq.org/latest/models/biological.html)\nto build a reaction network and generate the ODE/SDE/jump equations, and\ncompare the result to your handcoded versions.\n\n# Problem 2: Fitting Hybrid Delay Pharmacokinetic Models with Automated Responses (B)\n\nHybrid differential equations are differential equations with events, where\nevents are some interaction that occurs according to a prespecified condition.\nFor example, the bouncing ball is a classic hybrid differential equation given\nby an ODE (Newton's Law of Gravity) mixed with the fact that, whenever the\nball hits the floor (`x=0`), then the velocity of the ball flips (`v=-v`).\n\nIn addition, many models incorporate delays, that is the driving force of the\nequation is dependent not on the current values, but values from the past.\nThese delay differential equations model how individuals in the economy act\non old information, or that biological processes take time to adapt to a new\nenvironment.\n\nIn this equation we will build a hybrid delayed pharmacokinetic model and\nuse the parameter estimation techniques to fit this it to a data.\n\n## Part 1: Defining an ODE with Predetermined Doses\n\nFirst, let's define the simplest hybrid ordinary differential equation: an ODE\nwhere the events take place at fixed times. The ODE we will use is known as\nthe one-compartment model:\n\n$$\\begin{align}\n\\frac{d[Depot]}{dt} &= -K_a [Depot] + R\\\\\n\\frac{d[Central]}{dt} &= K_a [Depot] - K_e [Central]\\end{align}$$\n\nwith $t \\in [0,90]$, $u_0 = [100.0,0]$, and $p=[K_a,K_e]=[2.268,0.07398]$.\n\nWith this model, use [the event handling documentation page](http://docs.juliadiffeq.org/latest/features/callback_functions.html)\nto define a `DiscreteCallback` which fires at `t ∈ [24,48,72]` and adds a\ndose of 100 into `[Depot]`. (Hint: you'll want to set `tstops=[24,48,72]` to\nforce the ODE solver to step at these times).\n\n## Part 2: Adding Delays\n\nNow let's assume that instead of there being one compartment, there are many\ntransit compartment that the drug must move through in order to reach the\ncentral compartment. This effectively delays the effect of the transition from\n`[Depot]` to `[Central]`. To model this effect, we will use the delay\ndifferential equation which utilizes a fixed time delay $\\tau$:\n\n$$\\begin{align}\n\\frac{d[Depot]}{dt} &= -K_a [Depot](t)\\\\\n\\frac{d[Central]}{dt} &= K_a [Depot](t-\\tau) - K_e [Central]\\end{align}$$\n\nwhere the parameter $τ = 6.0$.\n[Use the DDE tutorial](http://docs.juliadiffeq.org/latest/tutorials/dde_example.html)\nto define and solve this delayed version of the hybrid model.\n\n## Part 3: Automatic Differentiation (AD) for Optimization (I)\n\nIn order to fit parameters $(K_a,K_e,\\tau)$ we will want to be able to calculate\nthe gradient of the solution with respect to the initial conditions. One way to\ndo this is via Automatic Differentition (AD). For small numbers of parameters\n(<100), it is fastest to use Forward-Mode Automatic Differentition\n(even faster than using adjoint sensitivity analysis!). Thus for this problem\nwe will make use of ForwardDiff.jl to use Dual number arithmetic to retrive\nboth the solution and its derivative w.r.t. parameters in a single solve.\n\n[Use the information from the page on local sensitvity analysis](http://docs.juliadiffeq.org/latest/analysis/sensitivity.html)\nto define the input dual numbers, solve the equation, and plot both the solution\nover time and the derivative of the solution w.r.t. the parameters.\n\n## Part 4: Fitting Known Quantities with DiffEqParamEstim.jl + Optim.jl\n\nNow let's fit the delayed model to a dataset. For the data, use the array" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "t = 0.0:12.0:90.0\ndata = [100.0 0.246196 0.000597933 0.24547 0.000596251 0.245275 0.000595453 0.245511\n 0.0 53.7939 16.8784 58.7789 18.3777 59.1879 18.5003 59.2611]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Use [the parameter estimation page](http://docs.juliadiffeq.org/latest/analysis/parameter_estimation.html)\nto define a loss function with `build_loss_objective` and optimize the parameters\nagainst the data. What parameters were used to generate the data?\n\n## Part 5: Implementing Control-Based Logic with ContinuousCallbacks (I)\n\nNow that we have fit our delay differential equation model to the dataset, we\nwant to start testing out automated treatment strategies. Let's assume that\ninstead of giving doses at fixed time points, we invent a wearable which\nmonitors the patient and administers a dose whenever the internal drug\nconcentration falls below 25. To model this effect, we will need to use\n`ContinuousCallbacks` to define a callback that triggers when `[Central]` falls\nbelow the threshold value.\n\n[Use the documentation on the event handling page](http://docs.juliadiffeq.org/latest/features/callback_functions.html) to define such a callback,\nand plot the solution over time. How many times does the auto-doser administer\na dose? How much does this change as you change the delay time $\\tau$?\n\n## Part 6: Global Sensitivity Analysis with the Morris and Sobol Methods\n\nTo understand how the parameters effect the solution in a global sense, one\nwants to use Global Sensitivity Analysis. Use the\n[GSA documentation page](http://docs.juliadiffeq.org/latest/analysis/global_sensitivity.html)\nperform global sensitivity analysis and quantify the effect of the various\nparameters on the solution.\n\n# Problem 3: Differential-Algebraic Equation Modeling of a Double Pendulum (B)\n\nDifferential-Algebraic Equaton (DAE) systems are like ODEs but allow for adding\nconstraints into the models. This problem will look at solving the double\npenulum problem with enforcement of the rigid body constraints, requiring that\nthe total distance `L` is constant throughout the simulation. While these\nequations can be rewritten in an ODE form, in many cases it can be simpler\nto solve the equation directly with the constraints. This tutorial will\ncover both the idea of index, how to manually perform index reduction,\nand how to make use of mass matrix and implicit ODE solvers to handle these\nproblems.\n\n## Part 1: Simple Introduction to DAEs: Mass-Matrix Robertson Equations\n\nA mass-matrix ordinary differential equation (ODE) is an ODE where the\nleft-hand side, the derivative side, is multiplied by a matrix known as the\nmass matrix. This is described as:\n\n$$Mu' = f(u,p,t)$$\n\nwhere $M$ is the mass matrix. When $M$ is invertible, there is an ODE which is\nequivalent to this formulation. When $M$ is not invertible, this can have a\ndistinctly different behavior and is as Differential-Algebraic Equation (DAE).\n\nSolve the Robertson DAE:\n\n$$\\begin{align}\n\\frac{dy_1}{dt} &= -0.04y_1 + 10^4 y_2y_3\\\\\n\\frac{dy_2}{dt} &= 0.04y_1 - 10^4 y_2y_3 - 3\\times 10^7 y_2^2\\\\\n1 &= y_1 + y_2 + y_3\\end{align}$$\n\nwith $y(0) = [1,0,0]$ and $dy(0) = [-0.04,0.04,0.0]$ using the mass-matrix\nformulation and `Rodas5()`. Use the\n[ODEProblem page](http://docs.juliadiffeq.org/latest/types/ode_types.html)\nto find out how to declare a mass matrix.\n\n(Hint: what if the last row has all zeros?)\n\n## Part 2: Solving the Implicit Robertson Equations with IDA\n\nUse the [DAE Tutorial](http://docs.juliadiffeq.org/latest/tutorials/dae_example.html)\nto define a DAE in its implicit form and solve the Robertson equation with IDA.\nWhy is `differential_vars = [true,true,false]`?\n\n## Part 3: Manual Index Reduction of the Single Pendulum\n\n## Part 4: Single Pendulum Solution with IDA\n\n## Part 5: Solving the Double Penulum DAE System\n\n# Problem 4: Performance Optimizing and Parallelizing Semilinear PDE Solvers (I)\n\nThis problem will focus on implementing and optimizing the solution of the\n2-dimensional Brusselator equations. The BRUSS equations are a well-known\nhighly stiff oscillatory system of partial differential equations which are\nused in stiff ODE solver benchmarks. In this tutorial we will walk first\nthrough a simple implmentation, then do allocation-free implementations and\nlooking deep into solver options and benchmarking.\n\n## Part 1: Implementing the BRUSS PDE System as ODEs\n\nThe Brusselator PDE is defined as follows:\n\n$$\\begin{align}\n\\frac{\\partial u}{\\partial t} &= 1 + u^2v - 4.4u + \\alpha(\\frac{\\partial^2 u}{\\partial x^2} + \\frac{\\partial^2 u}{\\partial y^2}) + f(x, y, t)\\\\\n\\frac{\\partial v}{\\partial t} &= 3.4u - u^2v + \\alpha(\\frac{\\partial^2 u}{\\partial x^2} + \\frac{\\partial^2 u}{\\partial y^2})\\end{align}$$\n\nwhere\n\n$$f(x, y, t) = \\begin{cases}\n5 & \\quad \\text{if } (x-0.3)^2+(y-0.6)^2 ≤ 0.1^2 \\text{ and } t ≥ 1.1 \\\\\n0 & \\quad \\text{else}\\end{cases}$$\n\nand the initial conditions are\n\n$$\\begin{align}\nu(x, y, 0) &= 22\\cdot y(1-y)^{3/2} \\\\\nv(x, y, 0) &= 27\\cdot x(1-x)^{3/2}\\end{align}$$\n\nwith the periodic boundary condition\n\n$$\\begin{align}\nu(x+1,y,t) &= u(x,y,t) \\\\\nu(x,y+1,t) &= u(x,y,t)\\end{align}$$\n\non a timespan of $t \\in [0,22]$.\n\nTo solve this PDE, we will discretize it into a system of ODEs with the finite\ndifference method. We discretize `u` and `v` into arrays of the values at each\ntime point: `u[i,j] = u(i*dx,j*dy)` for some choice of `dx`/`dy`, and same for\n`v`. Then our ODE is defined with `U[i,j,k] = [u v]`. The second derivative\noperator, the Laplacian, discretizes to become the `Tridiagonal` matrix with\n`[1 -2 1]` and a `1` in the top left and right corners. The nonlinear functions\nare then applied at each point in space (they are broadcast). Use `dx=dy=1/32`.\n\nYou will know when you have the correct solution when you plot the solution\nat `x=0.25` and see a periodic orbit.\n\nIf you are not familiar with this process, see\n[the Gierer-Meinhardt example from the DiffEqTutorials.](http://juliadiffeq.org/DiffEqTutorials.jl/html/introduction/03-optimizing_diffeq_code.html)\n\nNote: Start by doing the simplest implementation!\n\n## Part 2: Optimizing the BRUSS Code\n\nPDEs are expensive to solve, and so we will go nowhere without some code\noptimizing! Follow the steps described in the\n[the Gierer-Meinhardt example from the DiffEqTutorials](http://juliadiffeq.org/DiffEqTutorials.jl/html/introduction/03-optimizing_diffeq_code.html)\nto optimize your Brusselator code. Try other formulations and see what ends\nup the fastest! Find a trade-off between performance and simplicity that suits\nyour needs.\n\n## Part 3: Exploiting Jacobian Sparsity with Color Differentiation\n\nUse the `sparsity!` function from [SparseDiffTools](https://github.com/JuliaDiffEq/SparseDiffTools.jl)\nto generate the sparsity pattern for the Jacobian of this problem. Follow\nthe documentations [on the DiffEqFunction page](http://docs.juliadiffeq.org/latest/features/performance_overloads.html)\nto specify the sparsity pattern of the Jacobian. Generate an add the color\nvector to speed up the computation of the Jacobian.\n\n## (Optional) Part 4: Structured Jacobians\n\nSpecify the sparsity pattern using a BlockBandedMatrix from\n[BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl)\nto accelerate the previous sparsity handling tricks.\n\n## (Optional) Part 5: Automatic Symbolicification and Analytical Jacobian\n\nUse the `modelingtoolkitize` function from ModelingToolkit.jl to convert your\nnumerical ODE function into a symbolic ODE function and use that to compute and\nsolve with an analytical sparse Jacobian.\n\n## Part 6: Utilizing Preconditioned-GMRES Linear Solvers\n\nUse the [linear solver specification page](http://docs.juliadiffeq.org/latest/features/linear_nonlinear.html)\nto solve the equation with `TRBDF2` with GMRES. Use the Sundials documentation\nto solve the equation with `CVODE_BDF` with Sundials' special internal GMRES.\nTo both of these, use the [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl)\nto add a preconditioner to the GMRES solver.\n\n## Part 7: Exploring IMEX and Exponential Integrator Techniques (E)\n\nInstead of using the standard `ODEProblem`, define a [`SplitODEProblem`](http://docs.juliadiffeq.org/latest/types/split_ode_types.html)\nto move some of the equation to the the \"non-stiff part\". Try different splits\nand solve with `KenCarp4` to see if the solution can be accelerated.\n\nNext, use `DiffEqArrayOperator` to define part of the equation as linear, and\nuse the `ETDRK4` exponential integrator to solve the equation. Note that this\ntechnique is not appropriate for this equation since it relies on the\nnonlinear term being non-stiff for best results.\n\n## Part 8: Work-Precision Diagrams for Benchmarking Solver Choices\n\nUse the `WorkPrecisionSet` method from\n[DiffEqDevTools.jl](https://github.com/JuliaDiffEq/DiffEqDevTools.jl) to\nbenchmark multiple different solver methods and find out what combination is\nmost efficient.\n[Take a look at DiffEqBenchmarks.jl](https://github.com/JuliaDiffEq/DiffEqBenchmarks.jl)\nfor usage examples.\n\n## Part 9: GPU-Parallelism for PDEs (E)\n\nFully vectorize your implementation of the ODE and use a `CuArray` from\n[CuArrays.jl](https://github.com/JuliaGPU/CuArrays.jl) as the initial condition\nto cause the whole solution to be GPU accelerated.\n\n## Part 10: Adjoint Sensitivity Analysis for Gradients of PDEs\n\nIn order to optimize the parameters of a PDE, you need to be able to compute\nthe gradient of the solution with respect to the parameters. This is done\nthrough sensitivity analysis. For PDEs, generally the system is at a scale\nwhere forward sensitivity analysis (forward-mode automatic differentiation)\nis no longer suitable, and for these cases one uses adjoint sensitivity analysis.\n\nRewrite the PDE so the constant terms are parameters, and use the\n[adjoint sensitivity analysis](http://docs.juliadiffeq.org/latest/analysis/sensitivity.html#Adjoint-Sensitivity-Analysis-1)\ndocumentation to solve for the solution gradient with a cost function being the\nL2 distance of the solution from the value 1. Solve with interpolated and\ncheckpointed adjoints. Play with using reverse-mode automatic differentiation\nvs direct computation of vector-Jacobian products using the `autojacvec` option\nof the `SensitivityAlg`. Find the set of options most suitable for this PDE.\n\nIf you have compute time, use this adjoint to optimize the parameters of the\nPDE with respect to this cost function.\n\n# Problem 5: Global Parameter Sensitivity and Optimality with GPU and Distributed Ensembles (B)\n\nIn this example we will investigate how the parameters \"generally\" effect the\nsolution in the chaotic Henon-Heiles system. By \"generally\" we will use global\nsensitivity analysis methods to get an average global characterization of the\nparameters on the solution. In addition to a global sensitivity approach, we\nwill generate large ensembles of solutions with different parameters using\na GPU-based parallelism approach.\n\n## Part 1: Implementing the Henon-Heiles System (B)\n\nThe Henon-Heiles Hamiltonian system is described by the ODEs:\n\n$$\\begin{align}\n\\frac{dp_1}{dt} &= -q_1 (1 + 2q_2)\\\\\n\\frac{dp_2}{dt} &= -q_2 - (q_1^2 - q_2^2)\\\\\n\\frac{dq_1}{dt} &= p_1\\\\\n\\frac{dq_2}{dt} &= p_2\\end{align}$$\n\nwith initial conditions $u_0 = [0.1,0.0,0.0,0.5]$.\nSolve this system over the timespan $t\\in[0,1000]$\n\n## (Optional) Part 2: Alternative Dynamical Implmentations of Henon-Heiles (B)\n\nThe Henon-Heiles defines a Hamiltonian system with certain structures which\ncan be utilized for a more efficient solution. Use [the Dynamical problems page](http://docs.juliadiffeq.org/latest/types/dynamical_types.html)\nto define a `SecondOrderODEProblem` corresponding to the acceleration terms:\n\n$$\\begin{align}\n\\frac{dp_1^2}{dt} &= -q_1 (1 + 2q_2)\\\\\n\\frac{dp_2^2}{dt} &= -q_2 - (q_1^2 - q_2^2)\\end{align}$$\n\nSolve this with a method that is specific to dynamical problems, like `DPRKN6`.\n\nThe Hamiltonian can also be directly described:\n\n$$H(p,q) = \\frac{1}{2}(p_1^2 + p_2^2) + \\frac{1}{2}(q_1^2+q_2^2+2q_1^2 q_2 - \\frac{2}{3}q_2^3)$$\n\nSolve this problem using the `HamiltonianProblem` constructor from DiffEqPhysics.jl.\n\n## Part 3: Parallelized Ensemble Solving\n\nTo understand the orbits of the Henon-Heiles system, it can be useful to solve\nthe system with many different initial conditions. Use the\n[ensemble interface](http://docs.juliadiffeq.org/latest/features/ensemble.html)\nto solve with randomized initial conditions in parallel using threads with\n`EnsembleThreads()`. Then, use `addprocs()` to add more cores and solve using\n`EnsembleDistributed()`. The former will solve using all of the cores on a\nsingle computer, while the latter will use all of the cores on which there\nare processors, which can include thousands across a supercomputer! See\n[Julia's parallel computing setup page](https://docs.julialang.org/en/v1/manual/parallel-computing/index.html)\nfor more details on the setup.\n\n## Part 4: Parallelized GPU Ensemble Solving\n\nSetup the CUDAnative.jl library and use the `EnsembleGPUArray()` method to\nparallelize the solution across the thousands of cores of a GPU. Note that\nthis will efficiency solve for hundreds of thousands of trajectores.\n\n# Problem 6: Training Neural Stochastic Differential Equations with GPU acceleration (I)\n\nIn the previous models we had to define a model. Now let's shift the burden of\nmodel-proofing onto data by utilizing neural differential equations. A neural\ndifferential equation is a differential equation where the model equations\nare replaced, either in full or in part, by a neural network. For example, a\nneural ordinary differential equation is an equation $u^\\prime = f(u,p,t)$\nwhere $f$ is a neural network. We can learn this neural network from data using\nvarious methods, the easiest of which is known as the single shooting method,\nwhere one chooses neural network parameters, solves the equation, and checks\nthe ODE's solution against data as a loss.\n\nIn this example we will define and train various forms of neural differential\nequations. Note that all of the differential equation types are compatible with\nneural differential equations, so this is only going to scratch the surface of\nthe possibilites!\n\n## Part 1: Constructing and Training a Basic Neural ODE\n\nUse the [DiffEqFlux.jl README](https://github.com/JuliaDiffEq/DiffEqFlux.jl) to\nconstruct a neural ODE to train against the training data:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = Float32[2.; 0.]\ndatasize = 30\ntspan = (0.0f0,1.5f0)\n\nfunction trueODEfunc(du,u,p,t)\n true_A = [-0.1 2.0; -2.0 -0.1]\n du .= ((u.^3)'true_A)'\nend\nt = range(tspan[1],tspan[2],length=datasize)\nprob = ODEProblem(trueODEfunc,u0,tspan)\node_data = Array(solve(prob,Tsit5(),saveat=t))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Part 2: GPU-accelerating the Neural ODE Process\n\nUse the `gpu` function from Flux.jl to transform all of the calculations onto\nthe GPU and train the neural ODE using GPU-accelerated `Tsit5` with adjoints.\n\n## Part 3: Defining and Training a Mixed Neural ODE\n\nGather data from the Lotka-Volterra equation:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lotka_volterra(du,u,p,t)\n x, y = u\n α, β, δ, γ = p\n du[1] = dx = α*x - β*x*y\n du[2] = dy = -δ*y + γ*x*y\nend\nu0 = [1.0,1.0]\ntspan = (0.0,10.0)\np = [1.5,1.0,3.0,1.0]\nprob = ODEProblem(lotka_volterra,u0,tspan,p)\nsol = Array(solve(prob,Tsit5())(0.0:1.0:10.0))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now use the\n[mixed neural section of the documentation](https://github.com/JuliaDiffEq/DiffEqFlux.jl#mixed-neural-des)\nto define the mixed neural ODE where the functional form of $\\frac{dx}{dt}$ is\nknown, and try to derive a neural formulation for $\\frac{dy}{dt}$ directly from\nthe data.\n\n## Part 4: Constructing a Basic Neural SDE\n\nGenerate data from the Lotka-Volterra equation with multiplicative noise" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lotka_volterra(du,u,p,t)\n x, y = u\n α, β, δ, γ = p\n du[1] = dx = α*x - β*x*y\n du[2] = dy = -δ*y + γ*x*y\nend\nfunction lv_noise(du,u,p,t)\n du[1] = p[5]*u[1]\n du[2] = p[6]*u[2]\nend\nu0 = [1.0,1.0]\ntspan = (0.0,10.0)\np = [1.5,1.0,3.0,1.0,0.1,0.1]\nprob = SDEProblem(lotka_volterra,lv_noise,u0,tspan,p)\nsol = [Array(solve(prob,SOSRI())(0.0:1.0:10.0)) for i in 1:20] # 20 solution samples" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Train a neural stochastic differential equation $dX = f(X)dt + g(X)dW_t$ where\nboth the drift ($f$) and the diffusion ($g$) functions are neural networks.\nSee if constraining $g$ can make the problem easier to fit.\n\n## Part 5: Optimizing the training behavior with minibatching (E)\n\nUse minibatching on the data to improve the training procedure. An example\n[can be found at this PR](https://github.com/FluxML/model-zoo/pull/88)." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/exercises/02-workshop_solutions.ipynb b/notebook/exercises/02-workshop_solutions.ipynb deleted file mode 100644 index 27eff58c..00000000 --- a/notebook/exercises/02-workshop_solutions.ipynb +++ /dev/null @@ -1,174 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# DifferentialEquations.jl Workshop Exercise Solutions\n### Chris Rackauckas\n\n# Problem 1: Investigating Sources of Randomness and Uncertainty in a Biological System\n\n## Part 1: Simulating the Oregonator ODE model" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, Plots\nfunction orego(du,u,p,t)\n s,q,w = p\n y1,y2,y3 = u\n du[1] = s*(y2+y1*(1-q*y1-y2))\n du[2] = (y3-(1+y1)*y2)/s\n du[3] = w*(y1-y3)\nend\np = [77.27,8.375e-6,0.161]\nprob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,360.0),p)\nsol = solve(prob)\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Part 2: Investigating Stiffness" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using BenchmarkTools\nprob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,50.0),p)\n@btime sol = solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@btime sol = solve(prob,Rodas5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## (Optional) Part 3: Specifying Analytical Jacobians (I)\n\n## (Optional) Part 4: Automatic Symbolicification and Analytical Jacobian Calculations\n\n## Part 5: Adding stochasticity with stochastic differential equations" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function orego(du,u,p,t)\n s,q,w = p\n y1,y2,y3 = u\n du[1] = s*(y2+y1*(1-q*y1-y2))\n du[2] = (y3-(1+y1)*y2)/s\n du[3] = w*(y1-y3)\nend\nfunction g(du,u,p,t)\n du[1] = 0.1u[1]\n du[2] = 0.1u[2]\n du[3] = 0.1u[3]\nend\np = [77.27,8.375e-6,0.161]\nprob = SDEProblem(orego,g,[1.0,2.0,3.0],(0.0,30.0),p)\nsol = solve(prob,SOSRI())\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,ImplicitRKMil()); plot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,ImplicitRKMil()); plot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Part 6: Gillespie jump models of discrete stochasticity\n\n## Part 7: Probabilistic Programming / Bayesian Parameter Estimation with DiffEqBayes.jl + Turing.jl (I)\n\nThe data was generated with:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function orego(du,u,p,t)\n s,q,w = p\n y1,y2,y3 = u\n du[1] = s*(y2+y1*(1-q*y1-y2))\n du[2] = (y3-(1+y1)*y2)/s\n du[3] = w*(y1-y3)\nend\np = [60.0,1e-5,0.2]\nprob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,30.0),p)\nsol = solve(prob,Rodas5(),abstol=1/10^14,reltol=1/10^14)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## (Optional) Part 8: Using DiffEqBiological's Reaction Network DSL\n\n# Problem 2: Fitting Hybrid Delay Pharmacokinetic Models with Automated Responses (B)\n\n## Part 1: Defining an ODE with Predetermined Doses" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function onecompartment(du,u,p,t)\n Ka,Ke = p\n du[1] = -Ka*u[1]\n du[2] = Ka*u[1] - Ke*u[2]\nend\np = (Ka=2.268,Ke=0.07398)\nprob = ODEProblem(onecompartment,[100.0,0.0],(0.0,90.0),p)\n\ntstops = [24,48,72]\ncondition(u,t,integrator) = t ∈ tstops\naffect!(integrator) = (integrator.u[1] += 100)\ncb = DiscreteCallback(condition,affect!)\nsol = solve(prob,Tsit5(),callback=cb,tstops=tstops)\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Part 2: Adding Delays" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function onecompartment_delay(du,u,h,p,t)\n Ka,Ke,τ = p\n delayed_depot = h(p,t-τ)[1]\n du[1] = -Ka*u[1]\n du[2] = Ka*delayed_depot - Ke*u[2]\nend\np = (Ka=2.268,Ke=0.07398,τ=6.0)\nh(p,t) = [0.0,0.0]\nprob = DDEProblem(onecompartment_delay,[100.0,0.0],h,(0.0,90.0),p)\n\ntstops = [24,48,72]\ncondition(u,t,integrator) = t ∈ tstops\naffect!(integrator) = (integrator.u[1] += 100)\ncb = DiscreteCallback(condition,affect!)\nsol = solve(prob,MethodOfSteps(Rosenbrock23()),callback=cb,tstops=tstops)\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Part 3: Automatic Differentiation (AD) for Optimization (I)\n\n## Part 4: Fitting Known Quantities with DiffEqParamEstim.jl + Optim.jl\n\nThe data was generated with" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "p = (Ka = 0.5, Ke = 0.1, τ = 4.0)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Part 5: Implementing Control-Based Logic with ContinuousCallbacks (I)\n\n## Part 6: Global Sensitivity Analysis with the Morris and Sobol Methods\n\n# Problem 3: Differential-Algebraic Equation Modeling of a Double Pendulum (B)\n\n## Part 1: Simple Introduction to DAEs: Mass-Matrix Robertson Equations\n\n## Part 2: Solving the Implicit Robertson Equations with IDA\n\n## Part 3: Manual Index Reduction of the Single Pendulum\n\n## Part 4: Single Pendulum Solution with IDA\n\n## Part 5: Solving the Double Penulum DAE System\n\n# Problem 4: Performance Optimizing and Parallelizing Semilinear PDE Solvers (I)\n\n## Part 1: Implementing the BRUSS PDE System as ODEs\n\n## Part 2: Optimizing the BRUSS Code\n\n## Part 3: Exploiting Jacobian Sparsity with Color Differentiation\n\n## (Optional) Part 4: Structured Jacobians\n\n## (Optional) Part 5: Automatic Symbolicification and Analytical Jacobian\n\n## Part 6: Utilizing Preconditioned-GMRES Linear Solvers\n\n## Part 7: Exploring IMEX and Exponential Integrator Techniques (E)\n\n## Part 8: Work-Precision Diagrams for Benchmarking Solver Choices\n\n## Part 9: GPU-Parallelism for PDEs (E)\n\n## Part 10: Adjoint Sensitivity Analysis for Gradients of PDEs\n\n# Problem 5: Global Parameter Sensitivity and Optimality with GPU and Distributed Ensembles (B)\n\n## Part 1: Implementing the Henon-Heiles System (B)\n\n## (Optional) Part 2: Alternative Dynamical Implmentations of Henon-Heiles (B)\n\n## Part 3: Parallelized Ensemble Solving\n\n## Part 4: Parallelized GPU Ensemble Solving\n\n# Problem 6: Training Neural Stochastic Differential Equations with GPU acceleration (I)\n\n## Part 1: Constructing and Training a Basic Neural ODE\n\n## Part 2: GPU-accelerating the Neural ODE Process\n\n## Part 3: Defining and Training a Mixed Neural ODE\n\n## Part 4: Constructing a Basic Neural SDE\n\n## Part 5: Optimizing the training behavior with minibatching (E)" - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/introduction/01-ode_introduction.ipynb b/notebook/introduction/01-ode_introduction.ipynb deleted file mode 100644 index 413180c3..00000000 --- a/notebook/introduction/01-ode_introduction.ipynb +++ /dev/null @@ -1,716 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# An Intro to DifferentialEquations.jl\n### Chris Rackauckas\n\n## Basic Introduction Via Ordinary Differential Equations\n\nThis notebook will get you started with DifferentialEquations.jl by introducing you to the functionality for solving ordinary differential equations (ODEs). The corresponding documentation page is the [ODE tutorial](http://docs.juliadiffeq.org/latest/tutorials/ode_example.html). While some of the syntax may be different for other types of equations, the same general principles hold in each case. Our goal is to give a gentle and thorough introduction that highlights these principles in a way that will help you generalize what you have learned.\n\n### Background\n\nIf you are new to the study of differential equations, it can be helpful to do a quick background read on [the definition of ordinary differential equations](https://en.wikipedia.org/wiki/Ordinary_differential_equation). We define an ordinary differential equation as an equation which describes the way that a variable $u$ changes, that is\n\n$$u' = f(u,p,t)$$\n\nwhere $p$ are the parameters of the model, $t$ is the time variable, and $f$ is the nonlinear model of how $u$ changes. The initial value problem also includes the information about the starting value:\n\n$$u(t_0) = u_0$$\n\nTogether, if you know the starting value and you know how the value will change with time, then you know what the value will be at any time point in the future. This is the intuitive definition of a differential equation.\n\n### First Model: Exponential Growth\n\nOur first model will be the canonical exponential growth model. This model says that the rate of change is proportional to the current value, and is this:\n\n$$u' = au$$\n\nwhere we have a starting value $u(0)=u_0$. Let's say we put 1 dollar into Bitcoin which is increasing at a rate of $98\\%$ per year. Then calling now $t=0$ and measuring time in years, our model is:\n\n$$u' = 0.98u$$\n\nand $u(0) = 1.0$. We encode this into Julia by noticing that, in this setup, we match the general form when" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f(u,p,t) = 0.98u" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "with $ u_0 = 1.0 $. If we want to solve this model on a time span from `t=0.0` to `t=1.0`, then we define an `ODEProblem` by specifying this function `f`, this initial condition `u0`, and this time span as follows:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations\nf(u,p,t) = 0.98u\nu0 = 1.0\ntspan = (0.0,1.0)\nprob = ODEProblem(f,u0,tspan)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "To solve our `ODEProblem` we use the command `solve`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and that's it: we have succesfully solved our first ODE!\n\n#### Analyzing the Solution\n\nOf course, the solution type is not interesting in and of itself. We want to understand the solution! The documentation page which explains in detail the functions for analyzing the solution is the [Solution Handling](http://docs.juliadiffeq.org/latest/basics/solution.html) page. Here we will describe some of the basics. You can plot the solution using the plot recipe provided by [Plots.jl](http://docs.juliaplots.org/latest/):" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots; gr()\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "From the picture we see that the solution is an exponential curve, which matches our intuition. As a plot recipe, we can annotate the result using any of the [Plots.jl attributes](http://docs.juliaplots.org/latest/attributes/). For example:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,linewidth=5,title=\"Solution to the linear ODE with a thick line\",\n xaxis=\"Time (t)\",yaxis=\"u(t) (in μm)\",label=\"My Thick Line!\") # legend=false" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Using the mutating `plot!` command we can add other pieces to our plot. For this ODE we know that the true solution is $u(t) = u_0 exp(at)$, so let's add some of the true solution to our plot:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label=\"True Solution!\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "In the previous command I demonstrated `sol.t`, which grabs the array of time points that the solution was saved at:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol.t" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can get the array of solution values using `sol.u`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol.u" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "`sol.u[i]` is the value of the solution at time `sol.t[i]`. We can compute arrays of functions of the solution values using standard comprehensions, like:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "[t+u for (u,t) in tuples(sol)]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "However, one interesting feature is that, by default, the solution is a continuous function. If we check the print out again:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "you see that it says that the solution has a order changing interpolation. The default algorithm automatically switches between methods in order to handle all types of problems. For non-stiff equations (like the one we are solving), it is a continuous function of 4th order accuracy. We can call the solution as a function of time `sol(t)`. For example, to get the value at `t=0.45`, we can use the command:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol(0.45)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### Controlling the Solver\n\nDifferentialEquations.jl has a common set of solver controls among its algorithms which can be found [at the Common Solver Options](http://docs.juliadiffeq.org/latest/basics/common_solver_opts.html) page. We will detail some of the most widely used options.\n\nThe most useful options are the tolerances `abstol` and `reltol`. These tell the internal adaptive time stepping engine how precise of a solution you want. Generally, `reltol` is the relative accuracy while `abstol` is the accuracy when `u` is near zero. These tolerances are local tolerances and thus are not global guarantees. However, a good rule of thumb is that the total solution accuracy is 1-2 digits less than the relative tolerances. Thus for the defaults `abstol=1e-6` and `reltol=1e-3`, you can expect a global accuracy of about 1-2 digits. If we want to get around 6 digits of accuracy, we can use the commands:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,abstol=1e-8,reltol=1e-8)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now we can see no visible difference against the true solution:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol)\nplot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label=\"True Solution!\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that by decreasing the tolerance, the number of steps the solver had to take was `9` instead of the previous `5`. There is a trade off between accuracy and speed, and it is up to you to determine what is the right balance for your problem.\n\nAnother common option is to use `saveat` to make the solver save at specific time points. For example, if we want the solution at an even grid of `t=0.1k` for integers `k`, we would use the command:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,saveat=0.1)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that when `saveat` is used the continuous output variables are no longer saved and thus `sol(t)`, the interpolation, is only first order. We can save at an uneven grid of points by passing a collection of values to `saveat`. For example:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,saveat=[0.2,0.7,0.9])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "If we need to reduce the amount of saving, we can also turn off the continuous output directly via `dense=false`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,dense=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and to turn off all intermediate saving we can use `save_everystep=false`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "If we want to solve and only save the final value, we can even set `save_start=false`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,save_everystep=false,save_start = false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note that similarly on the other side there is `save_end=false`.\n\nMore advanced saving behaviors, such as saving functionals of the solution, are handled via the `SavingCallback` in the [Callback Library](http://docs.juliadiffeq.org/latest/features/callback_library.html#SavingCallback-1) which will be addressed later in the tutorial.\n\n#### Choosing Solver Algorithms\n\nThere is no best algorithm for numerically solving a differential equation. When you call `solve(prob)`, DifferentialEquations.jl makes a guess at a good algorithm for your problem, given the properties that you ask for (the tolerances, the saving information, etc.). However, in many cases you may want more direct control. A later notebook will help introduce the various *algorithms* in DifferentialEquations.jl, but for now let's introduce the *syntax*.\n\nThe most crucial determining factor in choosing a numerical method is the stiffness of the model. Stiffness is roughly characterized by a Jacobian `f` with large eigenvalues. That's quite mathematical, and we can think of it more intuitively: if you have big numbers in `f` (like parameters of order `1e5`), then it's probably stiff. Or, as the creator of the MATLAB ODE Suite, Lawrence Shampine, likes to define it, if the standard algorithms are slow, then it's stiff. We will go into more depth about diagnosing stiffness in a later tutorial, but for now note that if you believe your model may be stiff, you can hint this to the algorithm chooser via `alg_hints = [:stiff]`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,alg_hints=[:stiff])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Stiff algorithms have to solve implicit equations and linear systems at each step so they should only be used when required.\n\nIf we want to choose an algorithm directly, you can pass the algorithm type after the problem as `solve(prob,alg)`. For example, let's solve this problem using the `Tsit5()` algorithm, and just for show let's change the relative tolerance to `1e-6` at the same time:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5(),reltol=1e-6)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Systems of ODEs: The Lorenz Equation\n\nNow let's move to a system of ODEs. The [Lorenz equation](https://en.wikipedia.org/wiki/Lorenz_system) is the famous \"butterfly attractor\" that spawned chaos theory. It is defined by the system of ODEs:\n\n$$\n\\begin{align}\n\\frac{dx}{dt} &= \\sigma (y - x)\\\\\n\\frac{dy}{dt} &= x (\\rho - z) -y\\\\\n\\frac{dz}{dt} &= xy - \\beta z\n\\end{align}\n$$\n\nTo define a system of differential equations in DifferentialEquations.jl, we define our `f` as a vector function with a vector initial condition. Thus, for the vector `u = [x,y,z]'`, we have the derivative function:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lorenz!(du,u,p,t)\n σ,ρ,β = p\n du[1] = σ*(u[2]-u[1])\n du[2] = u[1]*(ρ-u[3]) - u[2]\n du[3] = u[1]*u[2] - β*u[3]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice here we used the in-place format which writes the output to the preallocated vector `du`. For systems of equations the in-place format is faster. We use the initial condition $u_0 = [1.0,0.0,0.0]$ as follows:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = [1.0,0.0,0.0]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Lastly, for this model we made use of the parameters `p`. We need to set this value in the `ODEProblem` as well. For our model we want to solve using the parameters $\\sigma = 10$, $\\rho = 28$, and $\\beta = 8/3$, and thus we build the parameter collection:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "p = (10,28,8/3) # we could also make this an array, or any other type!" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now we generate the `ODEProblem` type. In this case, since we have parameters, we add the parameter values to the end of the constructor call. Let's solve this on a time span of `t=0` to `t=100`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "tspan = (0.0,100.0)\nprob = ODEProblem(lorenz!,u0,tspan,p)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now, just as before, we solve the problem:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The same solution handling features apply to this case. Thus `sol.t` stores the time points and `sol.u` is an array storing the solution at the corresponding time points.\n\nHowever, there are a few extra features which are good to know when dealing with systems of equations. First of all, `sol` also acts like an array. `sol[i]` returns the solution at the `i`th time point." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol.t[10],sol[10]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Additionally, the solution acts like a matrix where `sol[j,i]` is the value of the `j`th variable at time `i`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol[2,10]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can get a real matrix by performing a conversion:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "A = Array(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This is the same as sol, i.e. `sol[i,j] = A[i,j]`, but now it's a true matrix. Plotting will by default show the time series for each variable:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "If we instead want to plot values against each other, we can use the `vars` command. Let's plot variable `1` against variable `2` against variable `3`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This is the classic Lorenz attractor plot, where the `x` axis is `u[1]`, the `y` axis is `u[2]`, and the `z` axis is `u[3]`. Note that the plot recipe by default uses the interpolation, but we can turn this off:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3),denseplot=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Yikes! This shows how calculating the continuous solution has saved a lot of computational effort by computing only a sparse solution and filling in the values! Note that in vars, `0=time`, and thus we can plot the time series of a single component like:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(0,2))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### A DSL for Parameterized Functions\n\nIn many cases you may be defining a lot of functions with parameters. There exists the domain-specific language (DSL) defined by the `@ode_def` macro for helping with this common problem. For example, we can define the Lotka-Volterra equation:\n\n$$\n\\begin{align}\n\\frac{dx}{dt} &= ax - bxy\\\\\n\\frac{dy}{dt} &= -cy + dxy\n\\end{align}\n$$\n\nas follows:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lotka_volterra!(du,u,p,t)\n du[1] = p[1]*u[1] - p[2]*u[1]*u[2]\n du[2] = -p[3]*u[2] + p[4]*u[1]*u[2]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "However, that can be hard to follow since there's a lot of \"programming\" getting in the way. Instead, you can use the `@ode_def` macro from ParameterizedFunctions.jl:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using ParameterizedFunctions\nlv! = @ode_def LotkaVolterra begin\n dx = a*x - b*x*y\n dy = -c*y + d*x*y\nend a b c d" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can then use the result just like an ODE function from before:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = [1.0,1.0]\np = (1.5,1.0,3.0,1.0)\ntspan = (0.0,10.0)\nprob = ODEProblem(lv!,u0,tspan,p)\nsol = solve(prob)\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Not only is the DSL convenient syntax, but it does some magic behind the scenes. For example, further parts of the tutorial will describe how solvers for stiff differential equations have to make use of the Jacobian in calculations. Here, the DSL uses symbolic differentiation to automatically derive that function:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "lv!.Jex" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The DSL can derive many other functions; this ability is used to speed up the solvers. An extension to DifferentialEquations.jl, [Latexify.jl](https://korsbo.github.io/Latexify.jl/latest/tutorials/parameterizedfunctions.html), allows you to extract these pieces as LaTeX expressions.\n\n## Internal Types\n\nThe last basic user-interface feature to explore is the choice of types. DifferentialEquations.jl respects your input types to determine the internal types that are used. Thus since in the previous cases, when we used `Float64` values for the initial condition, this meant that the internal values would be solved using `Float64`. We made sure that time was specified via `Float64` values, meaning that time steps would utilize 64-bit floats as well. But, by simply changing these types we can change what is used internally.\n\nAs a quick example, let's say we want to solve an ODE defined by a matrix. To do this, we can simply use a matrix as input." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "A = [1. 0 0 -5\n 4 -2 4 -3\n -4 0 0 1\n 5 -2 2 3]\nu0 = rand(4,2)\ntspan = (0.0,1.0)\nf(u,p,t) = A*u\nprob = ODEProblem(f,u0,tspan)\nsol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "There is no real difference from what we did before, but now in this case `u0` is a `4x2` matrix. Because of that, the solution at each time point is matrix:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol[3]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "In DifferentialEquations.jl, you can use any type that defines `+`, `-`, `*`, `/`, and has an appropriate `norm`. For example, if we want arbitrary precision floating point numbers, we can change the input to be a matrix of `BigFloat`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "big_u0 = big.(u0)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and we can solve the `ODEProblem` with arbitrary precision numbers by using that initial condition:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(f,big_u0,tspan)\nsol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol[1,3]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "To really make use of this, we would want to change `abstol` and `reltol` to be small! Notice that the type for \"time\" is different than the type for the dependent variables, and this can be used to optimize the algorithm via keeping multiple precisions. We can convert time to be arbitrary precision as well by defining our time span with `BigFloat` variables:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(f,big_u0,big.(tspan))\nsol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's end by showing a more complicated use of types. For small arrays, it's usually faster to do operations on static arrays via the package [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl). The syntax is similar to that of normal arrays, but for these special arrays we utilize the `@SMatrix` macro to indicate we want to create a static array." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using StaticArrays\nA = @SMatrix [ 1.0 0.0 0.0 -5.0\n 4.0 -2.0 4.0 -3.0\n -4.0 0.0 0.0 1.0\n 5.0 -2.0 2.0 3.0]\nu0 = @SMatrix rand(4,2)\ntspan = (0.0,1.0)\nf(u,p,t) = A*u\nprob = ODEProblem(f,u0,tspan)\nsol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol[3]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Conclusion\n\nThese are the basic controls in DifferentialEquations.jl. All equations are defined via a problem type, and the `solve` command is used with an algorithm choice (or the default) to get a solution. Every solution acts the same, like an array `sol[i]` with `sol.t[i]`, and also like a continuous function `sol(t)` with a nice plot command `plot(sol)`. The Common Solver Options can be used to control the solver for any equation type. Lastly, the types used in the numerical solving are determined by the input types, and this can be used to solve with arbitrary precision and add additional optimizations (this can be used to solve via GPUs for example!). While this was shown on ODEs, these techniques generalize to other types of equations as well." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/introduction/02-choosing_algs.ipynb b/notebook/introduction/02-choosing_algs.ipynb deleted file mode 100644 index 3cdd600f..00000000 --- a/notebook/introduction/02-choosing_algs.ipynb +++ /dev/null @@ -1,163 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Choosing an ODE Algorithm\n### Chris Rackauckas\n\nWhile the default algorithms, along with `alg_hints = [:stiff]`, will suffice in most cases, there are times when you may need to exert more control. The purpose of this part of the tutorial is to introduce you to some of the most widely used algorithm choices and when they should be used. The corresponding page of the documentation is the [ODE Solvers](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html) page which goes into more depth.\n\n## Diagnosing Stiffness\n\nOne of the key things to know for algorithm choices is whether your problem is stiff. Let's take for example the driven Van Der Pol equation:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, ParameterizedFunctions\nvan! = @ode_def VanDerPol begin\n dy = μ*((1-x^2)*y - x)\n dx = 1*y\nend μ\n\nprob = ODEProblem(van!,[0.0,2.0],(0.0,6.3),1e6)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "One indicating factor that should alert you to the fact that this model may be stiff is the fact that the parameter is `1e6`: large parameters generally mean stiff models. If we try to solve this with the default method:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here it shows that maximum iterations were reached. Another thing that can happen is that the solution can return that the solver was unstable (exploded to infinity) or that `dt` became too small. If these happen, the first thing to do is to check that your model is correct. It could very well be that you made an error that causes the model to be unstable!\n\nIf the model is the problem, then stiffness could be the reason. We can thus hint to the solver to use an appropriate method:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,alg_hints = [:stiff])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Or we can use the default algorithm. By default, DifferentialEquations.jl uses algorithms like `AutoTsit5(Rodas5())` which automatically detect stiffness and switch to an appropriate method once stiffness is known." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Another way to understand stiffness is to look at the solution." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots; gr()\nsol = solve(prob,alg_hints = [:stiff],reltol=1e-6)\nplot(sol,denseplot=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's zoom in on the y-axis to see what's going on:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,ylims = (-10.0,10.0))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice how there are some extreme vertical shifts that occur. These vertical shifts are places where the derivative term is very large, and this is indicative of stiffness. This is an extreme example to highlight the behavior, but this general idea can be carried over to your problem. When in doubt, simply try timing using both a stiff solver and a non-stiff solver and see which is more efficient.\n\nTo try this out, let's use BenchmarkTools, a package that let's us relatively reliably time code blocks." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lorenz!(du,u,p,t)\n σ,ρ,β = p\n du[1] = σ*(u[2]-u[1])\n du[2] = u[1]*(ρ-u[3]) - u[2]\n du[3] = u[1]*u[2] - β*u[3]\nend\nu0 = [1.0,0.0,0.0]\np = (10,28,8/3)\ntspan = (0.0,100.0)\nprob = ODEProblem(lorenz!,u0,tspan,p)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "And now, let's use the `@btime` macro from benchmark tools to compare the use of non-stiff and stiff solvers on this problem." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using BenchmarkTools\n@btime solve(prob);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@btime solve(prob,alg_hints = [:stiff]);" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "In this particular case, we can see that non-stiff solvers get us to the solution much more quickly.\n\n## The Recommended Methods\n\nWhen picking a method, the general rules are as follows:\n\n- Higher order is more efficient at lower tolerances, lower order is more efficient at higher tolerances\n- Adaptivity is essential in most real-world scenarios\n- Runge-Kutta methods do well with non-stiff equations, Rosenbrock methods do well with small stiff equations, BDF methods do well with large stiff equations\n\nWhile there are always exceptions to the rule, those are good guiding principles. Based on those, a simple way to choose methods is:\n\n- The default is `Tsit5()`, a non-stiff Runge-Kutta method of Order 5\n- If you use low tolerances (`1e-8`), try `Vern7()` or `Vern9()`\n- If you use high tolerances, try `BS3()`\n- If the problem is stiff, try `Rosenbrock23()`, `Rodas5()`, or `CVODE_BDF()`\n- If you don't know, use `AutoTsit5(Rosenbrock23())` or `AutoVern9(Rodas5())`.\n\n(This is a simplified version of the default algorithm chooser)\n\n## Comparison to other Software\n\nIf you are familiar with MATLAB, SciPy, or R's DESolve, here's a quick translation start to have transfer your knowledge over.\n\n- `ode23` -> `BS3()`\n- `ode45`/`dopri5` -> `DP5()`, though in most cases `Tsit5()` is more efficient\n- `ode23s` -> `Rosenbrock23()`, though in most cases `Rodas4()` is more efficient\n- `ode113` -> `VCABM()`, though in many cases `Vern7()` is more efficient\n- `dop853` -> `DP8()`, though in most cases `Vern7()` is more efficient\n- `ode15s`/`vode` -> `QNDF()`, though in many cases `CVODE_BDF()`, `Rodas4()`\n or `radau()` are more efficient\n- `ode23t` -> `Trapezoid()` for efficiency and `GenericTrapezoid()` for robustness\n- `ode23tb` -> `TRBDF2`\n- `lsoda` -> `lsoda()` (requires `]add LSODA; using LSODA`)\n- `ode15i` -> `IDA()`, though in many cases `Rodas4()` can handle the DAE and is\n significantly more efficient" - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/introduction/03-optimizing_diffeq_code.ipynb b/notebook/introduction/03-optimizing_diffeq_code.ipynb deleted file mode 100644 index baa02dc8..00000000 --- a/notebook/introduction/03-optimizing_diffeq_code.ipynb +++ /dev/null @@ -1,560 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Optimizing DiffEq Code\n### Chris Rackauckas\n\nIn this notebook we will walk through some of the main tools for optimizing your code in order to efficiently solve DifferentialEquations.jl. User-side optimizations are important because, for sufficiently difficult problems, most of the time will be spent inside of your `f` function, the function you are trying to solve. \"Efficient\" integrators are those that reduce the required number of `f` calls to hit the error tolerance. The main ideas for optimizing your DiffEq code, or any Julia function, are the following:\n\n- Make it non-allocating\n- Use StaticArrays for small arrays\n- Use broadcast fusion\n- Make it type-stable\n- Reduce redundant calculations\n- Make use of BLAS calls\n- Optimize algorithm choice\n\nWe'll discuss these strategies in the context of small and large systems. Let's start with small systems.\n\n## Optimizing Small Systems (<100 DEs)\n\nLet's take the classic Lorenz system from before. Let's start by naively writing the system in its out-of-place form:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lorenz(u,p,t)\n dx = 10.0*(u[2]-u[1])\n dy = u[1]*(28.0-u[3]) - u[2]\n dz = u[1]*u[2] - (8/3)*u[3]\n [dx,dy,dz]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here, `lorenz` returns an object, `[dx,dy,dz]`, which is created within the body of `lorenz`.\n\nThis is a common code pattern from high-level languages like MATLAB, SciPy, or R's deSolve. However, the issue with this form is that it allocates a vector, `[dx,dy,dz]`, at each step. Let's benchmark the solution process with this choice of function:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, BenchmarkTools\nu0 = [1.0;0.0;0.0]\ntspan = (0.0,100.0)\nprob = ODEProblem(lorenz,u0,tspan)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The BenchmarkTools package's `@benchmark` runs the code multiple times to get an accurate measurement. The minimum time is the time it takes when your OS and other background processes aren't getting in the way. Notice that in this case it takes about 5ms to solve and allocates around 11.11 MiB. However, if we were to use this inside of a real user code we'd see a lot of time spent doing garbage collection (GC) to clean up all of the arrays we made. Even if we turn off saving we have these allocations." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark solve(prob,Tsit5(),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The problem of course is that arrays are created every time our derivative function is called. This function is called multiple times per step and is thus the main source of memory usage. To fix this, we can use the in-place form to ***make our code non-allocating***:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lorenz!(du,u,p,t)\n du[1] = 10.0*(u[2]-u[1])\n du[2] = u[1]*(28.0-u[3]) - u[2]\n du[3] = u[1]*u[2] - (8/3)*u[3]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here, instead of creating an array each time, we utilized the cache array `du`. When the inplace form is used, DifferentialEquations.jl takes a different internal route that minimizes the internal allocations as well. When we benchmark this function, we will see quite a difference." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = [1.0;0.0;0.0]\ntspan = (0.0,100.0)\nprob = ODEProblem(lorenz!,u0,tspan)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark solve(prob,Tsit5(),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "There is a 4x time difference just from that change! Notice there are still some allocations and this is due to the construction of the integration cache. But this doesn't scale with the problem size:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "tspan = (0.0,500.0) # 5x longer than before\nprob = ODEProblem(lorenz!,u0,tspan)\n@benchmark solve(prob,Tsit5(),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "since that's all just setup allocations.\n\n#### But if the system is small we can optimize even more.\n\nAllocations are only expensive if they are \"heap allocations\". For a more in-depth definition of heap allocations, [there are a lot of sources online](http://net-informations.com/faq/net/stack-heap.htm). But a good working definition is that heap allocations are variable-sized slabs of memory which have to be pointed to, and this pointer indirection costs time. Additionally, the heap has to be managed and the garbage controllers has to actively keep track of what's on the heap.\n\nHowever, there's an alternative to heap allocations, known as stack allocations. The stack is statically-sized (known at compile time) and thus its accesses are quick. Additionally, the exact block of memory is known in advance by the compiler, and thus re-using the memory is cheap. This means that allocating on the stack has essentially no cost!\n\nArrays have to be heap allocated because their size (and thus the amount of memory they take up) is determined at runtime. But there are structures in Julia which are stack-allocated. `struct`s for example are stack-allocated \"value-type\"s. `Tuple`s are a stack-allocated collection. The most useful data structure for DiffEq though is the `StaticArray` from the package [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl). These arrays have their length determined at compile-time. They are created using macros attached to normal array expressions, for example:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using StaticArrays\nA = @SVector [2.0,3.0,5.0]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that the `3` after `SVector` gives the size of the `SVector`. It cannot be changed. Additionally, `SVector`s are immutable, so we have to create a new `SVector` to change values. But remember, we don't have to worry about allocations because this data structure is stack-allocated. `SArray`s have a lot of extra optimizations as well: they have fast matrix multiplication, fast QR factorizations, etc. which directly make use of the information about the size of the array. Thus, when possible they should be used.\n\nUnfortunately static arrays can only be used for sufficiently small arrays. After a certain size, they are forced to heap allocate after some instructions and their compile time balloons. Thus static arrays shouldn't be used if your system has more than 100 variables. Additionally, only the native Julia algorithms can fully utilize static arrays.\n\nLet's ***optimize `lorenz` using static arrays***. Note that in this case, we want to use the out-of-place allocating form, but this time we want to output a static array:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lorenz_static(u,p,t)\n dx = 10.0*(u[2]-u[1])\n dy = u[1]*(28.0-u[3]) - u[2]\n dz = u[1]*u[2] - (8/3)*u[3]\n @SVector [dx,dy,dz]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "To make the solver internally use static arrays, we simply give it a static array as the initial condition:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = @SVector [1.0,0.0,0.0]\ntspan = (0.0,100.0)\nprob = ODEProblem(lorenz_static,u0,tspan)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark solve(prob,Tsit5(),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "And that's pretty much all there is to it. With static arrays you don't have to worry about allocating, so use operations like `*` and don't worry about fusing operations (discussed in the next section). Do \"the vectorized code\" of R/MATLAB/Python and your code in this case will be fast, or directly use the numbers/values.\n\n#### Exercise 1\n\nImplement the out-of-place array, in-place array, and out-of-place static array forms for the [Henon-Heiles System](https://en.wikipedia.org/wiki/H%C3%A9non%E2%80%93Heiles_system) and time the results.\n\n## Optimizing Large Systems\n\n### Interlude: Managing Allocations with Broadcast Fusion\n\nWhen your system is sufficiently large, or you have to make use of a non-native Julia algorithm, you have to make use of `Array`s. In order to use arrays in the most efficient manner, you need to be careful about temporary allocations. Vectorized calculations naturally have plenty of temporary array allocations. This is because a vectorized calculation outputs a vector. Thus:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "A = rand(1000,1000); B = rand(1000,1000); C = rand(1000,1000)\ntest(A,B,C) = A + B + C\n@benchmark test(A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "That expression `A + B + C` creates 2 arrays. It first creates one for the output of `A + B`, then uses that result array to `+ C` to get the final result. 2 arrays! We don't want that! The first thing to do to fix this is to use broadcast fusion. [Broadcast fusion](https://julialang.org/blog/2017/01/moredots) puts expressions together. For example, instead of doing the `+` operations separately, if we were to add them all at the same time, then we would only have a single array that's created. For example:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "test2(A,B,C) = map((a,b,c)->a+b+c,A,B,C)\n@benchmark test2(A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Puts the whole expression into a single function call, and thus only one array is required to store output. This is the same as writing the loop:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function test3(A,B,C)\n D = similar(A)\n @inbounds for i in eachindex(A)\n D[i] = A[i] + B[i] + C[i]\n end\n D\nend\n@benchmark test3(A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "However, Julia's broadcast is syntactic sugar for this. If multiple expressions have a `.`, then it will put those vectorized operations together. Thus:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "test4(A,B,C) = A .+ B .+ C\n@benchmark test4(A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "is a version with only 1 array created (the output). Note that `.`s can be used with function calls as well:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sin.(A) .+ sin.(B)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Also, the `@.` macro applys a dot to every operator:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "test5(A,B,C) = @. A + B + C #only one array allocated\n@benchmark test5(A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Using these tools we can get rid of our intermediate array allocations for many vectorized function calls. But we are still allocating the output array. To get rid of that allocation, we can instead use mutation. Mutating broadcast is done via `.=`. For example, if we pre-allocate the output:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "D = zeros(1000,1000);" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Then we can keep re-using this cache for subsequent calculations. The mutating broadcasting form is:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "test6!(D,A,B,C) = D .= A .+ B .+ C #only one array allocated\n@benchmark test6!(D,A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "If we use `@.` before the `=`, then it will turn it into `.=`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "test7!(D,A,B,C) = @. D = A + B + C #only one array allocated\n@benchmark test7!(D,A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that in this case, there is no \"output\", and instead the values inside of `D` are what are changed (like with the DiffEq inplace function). Many Julia functions have a mutating form which is denoted with a `!`. For example, the mutating form of the `map` is `map!`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "test8!(D,A,B,C) = map!((a,b,c)->a+b+c,D,A,B,C)\n@benchmark test8!(D,A,B,C)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Some operations require using an alternate mutating form in order to be fast. For example, matrix multiplication via `*` allocates a temporary:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark A*B" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Instead, we can use the mutating form `mul!` into a cache array to avoid allocating the output:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using LinearAlgebra\n@benchmark mul!(D,A,B) # same as D = A * B" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "For repeated calculations this reduced allocation can stop GC cycles and thus lead to more efficient code. Additionally, ***we can fuse together higher level linear algebra operations using BLAS***. The package [SugarBLAS.jl](https://github.com/lopezm94/SugarBLAS.jl) makes it easy to write higher level operations like `alpha*B*A + beta*C` as mutating BLAS calls.\n\n### Example Optimization: Gierer-Meinhardt Reaction-Diffusion PDE Discretization\n\nLet's optimize the solution of a Reaction-Diffusion PDE's discretization. In its discretized form, this is the ODE:\n\n$$\n\\begin{align}\ndu &= D_1 (A_y u + u A_x) + \\frac{au^2}{v} + \\bar{u} - \\alpha u\\\\\ndv &= D_2 (A_y v + v A_x) + a u^2 + \\beta v\n\\end{align}\n$$\n\nwhere $u$, $v$, and $A$ are matrices. Here, we will use the simplified version where $A$ is the tridiagonal stencil $[1,-2,1]$, i.e. it's the 2D discretization of the LaPlacian. The native code would be something along the lines of:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Generate the constants\np = (1.0,1.0,1.0,10.0,0.001,100.0) # a,α,ubar,β,D1,D2\nN = 100\nAx = Array(Tridiagonal([1.0 for i in 1:N-1],[-2.0 for i in 1:N],[1.0 for i in 1:N-1]))\nAy = copy(Ax)\nAx[2,1] = 2.0\nAx[end-1,end] = 2.0\nAy[1,2] = 2.0\nAy[end,end-1] = 2.0\n\nfunction basic_version!(dr,r,p,t)\n a,α,ubar,β,D1,D2 = p\n u = r[:,:,1]\n v = r[:,:,2]\n Du = D1*(Ay*u + u*Ax)\n Dv = D2*(Ay*v + v*Ax)\n dr[:,:,1] = Du .+ a.*u.*u./v .+ ubar .- α*u\n dr[:,:,2] = Dv .+ a.*u.*u .- β*v\nend\n\na,α,ubar,β,D1,D2 = p\nuss = (ubar+β)/α\nvss = (a/β)*uss^2\nr0 = zeros(100,100,2)\nr0[:,:,1] .= uss.+0.1.*rand.()\nr0[:,:,2] .= vss\n\nprob = ODEProblem(basic_version!,r0,(0.0,0.1),p)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "In this version we have encoded our initial condition to be a 3-dimensional array, with `u[:,:,1]` being the `A` part and `u[:,:,2]` being the `B` part." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "While this version isn't very efficient,\n\n#### We recommend writing the \"high-level\" code first, and iteratively optimizing it!\n\nThe first thing that we can do is get rid of the slicing allocations. The operation `r[:,:,1]` creates a temporary array instead of a \"view\", i.e. a pointer to the already existing memory. To make it a view, add `@view`. Note that we have to be careful with views because they point to the same memory, and thus changing a view changes the original values:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "A = rand(4)\n@show A\nB = @view A[1:3]\nB[2] = 2\n@show A" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that changing `B` changed `A`. This is something to be careful of, but at the same time we want to use this since we want to modify the output `dr`. Additionally, the last statement is a purely element-wise operation, and thus we can make use of broadcast fusion there. Let's rewrite `basic_version!` to ***avoid slicing allocations*** and to ***use broadcast fusion***:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function gm2!(dr,r,p,t)\n a,α,ubar,β,D1,D2 = p\n u = @view r[:,:,1]\n v = @view r[:,:,2]\n du = @view dr[:,:,1]\n dv = @view dr[:,:,2]\n Du = D1*(Ay*u + u*Ax)\n Dv = D2*(Ay*v + v*Ax)\n @. du = Du + a.*u.*u./v + ubar - α*u\n @. dv = Dv + a.*u.*u - β*v\nend\nprob = ODEProblem(gm2!,r0,(0.0,0.1),p)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now, most of the allocations are taking place in `Du = D1*(Ay*u + u*Ax)` since those operations are vectorized and not mutating. We should instead replace the matrix multiplications with `mul!`. When doing so, we will need to have cache variables to write into. This looks like:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "Ayu = zeros(N,N)\nuAx = zeros(N,N)\nDu = zeros(N,N)\nAyv = zeros(N,N)\nvAx = zeros(N,N)\nDv = zeros(N,N)\nfunction gm3!(dr,r,p,t)\n a,α,ubar,β,D1,D2 = p\n u = @view r[:,:,1]\n v = @view r[:,:,2]\n du = @view dr[:,:,1]\n dv = @view dr[:,:,2]\n mul!(Ayu,Ay,u)\n mul!(uAx,u,Ax)\n mul!(Ayv,Ay,v)\n mul!(vAx,v,Ax)\n @. Du = D1*(Ayu + uAx)\n @. Dv = D2*(Ayv + vAx)\n @. du = Du + a*u*u./v + ubar - α*u\n @. dv = Dv + a*u*u - β*v\nend\nprob = ODEProblem(gm3!,r0,(0.0,0.1),p)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "But our temporary variables are global variables. We need to either declare the caches as `const` or localize them. We can localize them by adding them to the parameters, `p`. It's easier for the compiler to reason about local variables than global variables. ***Localizing variables helps to ensure type stability***." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "p = (1.0,1.0,1.0,10.0,0.001,100.0,Ayu,uAx,Du,Ayv,vAx,Dv) # a,α,ubar,β,D1,D2\nfunction gm4!(dr,r,p,t)\n a,α,ubar,β,D1,D2,Ayu,uAx,Du,Ayv,vAx,Dv = p\n u = @view r[:,:,1]\n v = @view r[:,:,2]\n du = @view dr[:,:,1]\n dv = @view dr[:,:,2]\n mul!(Ayu,Ay,u)\n mul!(uAx,u,Ax)\n mul!(Ayv,Ay,v)\n mul!(vAx,v,Ax)\n @. Du = D1*(Ayu + uAx)\n @. Dv = D2*(Ayv + vAx)\n @. du = Du + a*u*u./v + ubar - α*u\n @. dv = Dv + a*u*u - β*v\nend\nprob = ODEProblem(gm4!,r0,(0.0,0.1),p)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We could then use the BLAS `gemmv` to optimize the matrix multiplications some more, but instead let's devectorize the stencil." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "p = (1.0,1.0,1.0,10.0,0.001,100.0,N)\nfunction fast_gm!(du,u,p,t)\n a,α,ubar,β,D1,D2,N = p\n\n @inbounds for j in 2:N-1, i in 2:N-1\n du[i,j,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n end\n\n @inbounds for j in 2:N-1, i in 2:N-1\n du[i,j,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n end\n\n @inbounds for j in 2:N-1\n i = 1\n du[1,j,1] = D1*(2u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n end\n @inbounds for j in 2:N-1\n i = 1\n du[1,j,2] = D2*(2u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n end\n @inbounds for j in 2:N-1\n i = N\n du[end,j,1] = D1*(2u[i-1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n end\n @inbounds for j in 2:N-1\n i = N\n du[end,j,2] = D2*(2u[i-1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n end\n\n @inbounds for i in 2:N-1\n j = 1\n du[i,1,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n end\n @inbounds for i in 2:N-1\n j = 1\n du[i,1,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n end\n @inbounds for i in 2:N-1\n j = N\n du[i,end,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n end\n @inbounds for i in 2:N-1\n j = N\n du[i,end,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n end\n\n @inbounds begin\n i = 1; j = 1\n du[1,1,1] = D1*(2u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n du[1,1,2] = D2*(2u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n\n i = 1; j = N\n du[1,N,1] = D1*(2u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n du[1,N,2] = D2*(2u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n\n i = N; j = 1\n du[N,1,1] = D1*(2u[i-1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n du[N,1,2] = D2*(2u[i-1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n\n i = N; j = N\n du[end,end,1] = D1*(2u[i-1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) +\n a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1]\n du[end,end,2] = D2*(2u[i-1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) +\n a*u[i,j,1]^2 - β*u[i,j,2]\n end\nend\nprob = ODEProblem(fast_gm!,r0,(0.0,0.1),p)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Lastly, we can do other things like multithread the main loops, but these optimizations get the last 2x-3x out. The main optimizations which apply everywhere are the ones we just performed (though the last one only works if your matrix is a stencil. This is known as a matrix-free implementation of the PDE discretization).\n\nThis gets us to about 8x faster than our original MATLAB/SciPy/R vectorized style code!\n\nThe last thing to do is then ***optimize our algorithm choice***. We have been using `Tsit5()` as our test algorithm, but in reality this problem is a stiff PDE discretization and thus one recommendation is to use `CVODE_BDF()`. However, instead of using the default dense Jacobian, we should make use of the sparse Jacobian afforded by the problem. The Jacobian is the matrix $\\frac{df_i}{dr_j}$, where $r$ is read by the linear index (i.e. down columns). But since the $u$ variables depend on the $v$, the band size here is large, and thus this will not do well with a Banded Jacobian solver. Instead, we utilize sparse Jacobian algorithms. `CVODE_BDF` allows us to use a sparse Newton-Krylov solver by setting `linear_solver = :GMRES` (see [the solver documentation](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html#Sundials.jl-1), and thus we can solve this problem efficiently. Let's see how this scales as we increase the integration time." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(fast_gm!,r0,(0.0,10.0),p)\n@benchmark solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Sundials\n@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(fast_gm!,r0,(0.0,100.0),p)\n# Will go out of memory if we don't turn off `save_everystep`!\n@benchmark solve(prob,Tsit5(),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now let's check the allocation growth." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(fast_gm!,r0,(0.0,500.0),p)\n@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that we've elimated almost all allocations, allowing the code to grow without hitting garbage collection and slowing down.\n\nWhy is `CVODE_BDF` doing well? What's happening is that, because the problem is stiff, the number of steps required by the explicit Runge-Kutta method grows rapidly, whereas `CVODE_BDF` is taking large steps. Additionally, the `GMRES` linear solver form is quite an efficient way to solve the implicit system in this case. This is problem-dependent, and in many cases using a Krylov method effectively requires a preconditioner, so you need to play around with testing other algorithms and linear solvers to find out what works best with your problem.\n\n## Conclusion\n\nJulia gives you the tools to optimize the solver \"all the way\", but you need to make use of it. The main thing to avoid is temporary allocations. For small systems, this is effectively done via static arrays. For large systems, this is done via in-place operations and cache arrays. Either way, the resulting solution can be immensely sped up over vectorized formulations by using these principles." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/introduction/04-callbacks_and_events.ipynb b/notebook/introduction/04-callbacks_and_events.ipynb deleted file mode 100644 index 92417876..00000000 --- a/notebook/introduction/04-callbacks_and_events.ipynb +++ /dev/null @@ -1,551 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Callbacks and Events\n### Chris Rackauckas\n\nIn working with a differential equation, our system will evolve through many states. Particular states of the system may be of interest to us, and we say that an ***\"event\"*** is triggered when our system reaches these states. For example, events may include the moment when our system reaches a particular temperature or velocity. We ***handle*** these events with ***callbacks***, which tell us what to do once an event has been triggered.\n\nThese callbacks allow for a lot more than event handling, however. For example, we can use callbacks to achieve high-level behavior like exactly preserve conservation laws and save the trace of a matrix at pre-defined time points. This extra functionality allows us to use the callback system as a modding system for the DiffEq ecosystem's solvers.\n\nThis tutorial is an introduction to the callback and event handling system in DifferentialEquations.jl, documented in the [Event Handling and Callback Functions](http://docs.juliadiffeq.org/latest/features/callback_functions.html) page of the documentation. We will also introduce you to some of the most widely used callbacks in the [Callback Library](http://docs.juliadiffeq.org/latest/features/callback_library.html), which is a library of pre-built mods.\n\n## Events and Continuous Callbacks\n\nEvent handling is done through continuous callbacks. Callbacks take a function, `condition`, which triggers an `affect!` when `condition == 0`. These callbacks are called \"continuous\" because they will utilize rootfinding on the interpolation to find the \"exact\" time point at which the condition takes place and apply the `affect!` at that time point.\n\n***Let's use a bouncing ball as a simple system to explain events and callbacks.*** Let's take Newton's model of a ball falling towards the Earth's surface via a gravitational constant `g`. In this case, the velocity is changing via `-g`, and position is changing via the velocity. Therefore we receive the system of ODEs:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, ParameterizedFunctions\nball! = @ode_def BallBounce begin\n dy = v\n dv = -g\nend g" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We want the callback to trigger when `y=0` since that's when the ball will hit the Earth's surface (our event). We do this with the condition:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function condition(u,t,integrator)\n u[1]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Recall that the `condition` will trigger when it evaluates to zero, and here it will evaluate to zero when `u[1] == 0`, which occurs when `v == 0`. *Now we have to say what we want the callback to do.* Callbacks make use of the [Integrator Interface](http://docs.juliadiffeq.org/latest/basics/integrator.html). Instead of giving a full description, a quick and usable rundown is:\n\n- Values are strored in `integrator.u`\n- Times are stored in `integrator.t`\n- The parameters are stored in `integrator.p`\n- `integrator(t)` performs an interpolation in the current interval between `integrator.tprev` and `integrator.t` (and allows extrapolation)\n- User-defined options (tolerances, etc.) are stored in `integrator.opts`\n- `integrator.sol` is the current solution object. Note that `integrator.sol.prob` is the current problem\n\nWhile there's a lot more on the integrator interface page, that's a working knowledge of what's there.\n\nWhat we want to do with our `affect!` is to \"make the ball bounce\". Mathematically speaking, the ball bounces when the sign of the velocity flips. As an added behavior, let's also use a small friction constant to dampen the ball's velocity. This way only a percentage of the velocity will be retained when the event is triggered and the callback is used. We'll define this behavior in the `affect!` function:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function affect!(integrator)\n integrator.u[2] = -integrator.p[2] * integrator.u[2]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "`integrator.u[2]` is the second value of our model, which is `v` or velocity, and `integrator.p[2]`, is our friction coefficient.\n\nTherefore `affect!` can be read as follows: `affect!` will take the current value of velocity, and multiply it `-1` multiplied by our friction coefficient. Therefore the ball will change direction and its velocity will dampen when `affect!` is called.\n\nNow let's build the `ContinuousCallback`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "bounce_cb = ContinuousCallback(condition,affect!)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now let's make an `ODEProblem` which has our callback:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = [50.0,0.0]\ntspan = (0.0,15.0)\np = (9.8,0.9)\nprob = ODEProblem(ball!,u0,tspan,p,callback=bounce_cb)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that we chose a friction constant of `0.9`. Now we can solve the problem and plot the solution as we normally would:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5())\nusing Plots; gr()\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and tada, the ball bounces! Notice that the `ContinuousCallback` is using the interpolation to apply the effect \"exactly\" when `v == 0`. This is crucial for model correctness, and thus when this property is needed a `ContinuousCallback` should be used.\n\n#### Exercise 1\n\nIn our example we used a constant coefficient of friction, but if we are bouncing the ball in the same place we may be smoothing the surface (say, squishing the grass), causing there to be less friction after each bounce. In this more advanced model, we want the friction coefficient at the next bounce to be `sqrt(friction)` from the previous bounce (since `friction < 1`, `sqrt(friction) > friction` and `sqrt(friction) < 1`).\n\nHint: there are many ways to implement this. One way to do it is to make `p` a `Vector` and mutate the friction coefficient in the `affect!`.\n\n## Discrete Callbacks\n\nA discrete callback checks a `condition` after every integration step and, if true, it will apply an `affect!`. For example, let's say that at time `t=2` we want to include that a kid kicked the ball, adding `20` to the current velocity. This kind of situation, where we want to add a specific behavior which does not require rootfinding, is a good candidate for a `DiscreteCallback`. In this case, the `condition` is a boolean for whether to apply the `affect!`, so:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function condition_kick(u,t,integrator)\n t == 2\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We want the kick to occur at `t=2`, so we check for that time point. When we are at this time point, we want to do:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function affect_kick!(integrator)\n integrator.u[2] += 50\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now we build the problem as before:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "kick_cb = DiscreteCallback(condition_kick,affect_kick!)\nu0 = [50.0,0.0]\ntspan = (0.0,10.0)\np = (9.8,0.9)\nprob = ODEProblem(ball!,u0,tspan,p,callback=kick_cb)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note that, since we are requiring our effect at exactly the time `t=2`, we need to tell the integration scheme to step at exactly `t=2` to apply this callback. This is done via the option `tstops`, which is like `saveat` but means \"stop at these values\"." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5(),tstops=[2.0])\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note that this example could've been done with a `ContinuousCallback` by checking the condition `t-2`.\n\n## Merging Callbacks with Callback Sets\n\nIn some cases you may want to merge callbacks to build up more complex behavior. In our previous result, notice that the model is unphysical because the ball goes below zero! What we really need to do is add the bounce callback together with the kick. This can be achieved through the `CallbackSet`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "cb = CallbackSet(bounce_cb,kick_cb)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "A `CallbackSet` merges their behavior together. The logic is as follows. In a given interval, if there are multiple continuous callbacks that would trigger, only the one that triggers at the earliest time is used. The time is pulled back to where that continuous callback is triggered, and then the `DiscreteCallback`s in the callback set are called in order." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = [50.0,0.0]\ntspan = (0.0,15.0)\np = (9.8,0.9)\nprob = ODEProblem(ball!,u0,tspan,p,callback=cb)\nsol = solve(prob,Tsit5(),tstops=[2.0])\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that we have now merged the behaviors. We can then nest this as deep as we like.\n\n#### Exercise 2\n\nAdd to the model a linear wind with resistance that changes the acceleration to `-g + k*v` after `t=10`. Do so by adding another parameter and allowing it to be zero until a specific time point where a third callback triggers the change.\n\n## Integration Termination and Directional Handling\n\nLet's look at another model now: the model of the [Harmonic Oscillator](https://en.wikipedia.org/wiki/Harmonic_oscillator). We can write this as:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u0 = [1.,0.]\nharmonic! = @ode_def HarmonicOscillator begin\n dv = -x\n dx = v\nend\ntspan = (0.0,10.0)\nprob = ODEProblem(harmonic!,u0,tspan)\nsol = solve(prob)\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's instead stop the integration when a condition is met. From the [Integrator Interface stepping controls](http://docs.juliadiffeq.org/latest/basics/integrator.html#Stepping-Controls-1) we see that `terminate!(integrator)` will cause the integration to end. So our new `affect!` is simply:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function terminate_affect!(integrator)\n terminate!(integrator)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's first stop the integration when the particle moves back to `x=0`. This means we want to use the condition:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function terminate_condition(u,t,integrator)\n u[2]\nend\nterminate_cb = ContinuousCallback(terminate_condition,terminate_affect!)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note that instead of adding callbacks to the problem, we can also add them to the `solve` command. This will automatically form a `CallbackSet` with any problem-related callbacks and naturally allows you to distinguish between model features and integration controls." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,callback=terminate_cb)\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that the harmonic oscilator's true solution here is `sin` and `cosine`, and thus we would expect this return to zero to happen at `t=π`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol.t[end]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This is one way to approximate π! Lower tolerances and arbitrary precision numbers can make this more exact, but let's not look at that. Instead, what if we wanted to halt the integration after exactly one cycle? To do so we would need to ignore the first zero-crossing. Luckily in these types of scenarios there's usually a structure to the problem that can be exploited. Here, we only want to trigger the `affect!` when crossing from positive to negative, and not when crossing from negative to positive. In other words, we want our `affect!` to only occur on upcrossings.\n\nIf the `ContinuousCallback` constructor is given a single `affect!`, it will occur on both upcrossings and downcrossings. If there are two `affect!`s given, then the first is for upcrossings and the second is for downcrossings. An `affect!` can be ignored by using `nothing`. Together, the \"upcrossing-only\" version of the effect means that the first `affect!` is what we defined above and the second is `nothing`. Therefore we want:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "terminate_upcrossing_cb = ContinuousCallback(terminate_condition,terminate_affect!,nothing)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Which gives us:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,callback=terminate_upcrossing_cb)\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Callback Library\n\nAs you can see, callbacks can be very useful and through `CallbackSets` we can merge together various behaviors. Because of this utility, there is a library of pre-built callbacks known as the [Callback Library](http://docs.juliadiffeq.org/latest/features/callback_library.html). We will walk through a few examples where these callbacks can come in handy.\n\n### Manifold Projection\n\nOne callback is the manifold projection callback. Essentially, you can define any manifold `g(sol)=0` which the solution must live on, and cause the integration to project to that manifold after every step. As an example, let's see what happens if we naively run the harmonic oscillator for a long time:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "tspan = (0.0,10000.0)\nprob = ODEProblem(harmonic!,u0,tspan)\nsol = solve(prob)\ngr(fmt=:png) # Make it a PNG instead of an SVG since there's a lot of points!\nplot(sol,vars=(1,2))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(0,1),denseplot=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that what's going on is that the numerical solution is drifting from the true solution over this long time scale. This is because the integrator is not conserving energy." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol.t,[u[2]^2 + u[1]^2 for u in sol.u]) # Energy ~ x^2 + v^2" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Some integration techniques like [symplectic integrators](http://docs.juliadiffeq.org/latest/solvers/dynamical_solve.html#Symplectic-Integrators-1) are designed to mitigate this issue, but instead let's tackle the problem by enforcing conservation of energy. To do so, we define our manifold as the one where energy equals 1 (since that holds in the initial condition), that is:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function g(resid,u,p,t)\n resid[1] = u[2]^2 + u[1]^2 - 1\n resid[2] = 0\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here the residual measures how far from our desired energy we are, and the number of conditions matches the size of our system (we ignored the second one by making the residual 0). Thus we define a `ManifoldProjection` callback and add that to the solver:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "cb = ManifoldProjection(g)\nsol = solve(prob,callback=cb)\nplot(sol,vars=(1,2))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(0,1),denseplot=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now we have \"perfect\" energy conservation, where if it's ever violated too much the solution will get projected back to `energy=1`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u1,u2 = sol[500]\nu2^2 + u1^2" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "While choosing different integration schemes and using lower tolerances can achieve this effect as well, this can be a nice way to enforce physical constraints and is thus used in many disciplines like molecular dynamics. Another such domain constraining callback is the [`PositiveCallback()`](http://docs.juliadiffeq.org/latest/features/callback_library.html#PositiveDomain-1) which can be used to enforce positivity of the variables.\n\n### SavingCallback\n\nThe `SavingCallback` can be used to allow for special saving behavior. Let's take a linear ODE define on a system of 1000x1000 matrices:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem((du,u,p,t)->du.=u,rand(1000,1000),(0.0,1.0))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "In fields like quantum mechanics you may only want to know specific properties of the solution such as the trace or the norm of the matrix. Saving all of the 1000x1000 matrices can be a costly way to get this information! Instead, we can use the `SavingCallback` to save the `trace` and `norm` at specified times. To do so, we first define our `SavedValues` cache. Our time is in terms of `Float64`, and we want to save tuples of `Float64`s (one for the `trace` and one for the `norm`), and thus we generate the cache as:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "saved_values = SavedValues(Float64, Tuple{Float64,Float64})" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now we define the `SavingCallback` by giving it a function of `(u,p,t,integrator)` that returns the values to save, and the cache:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using LinearAlgebra\ncb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here we take `u` and save `(tr(u),norm(u))`. When we solve with this callback:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Our values are stored in our `saved_values` variable:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "saved_values.t" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "saved_values.saveval" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "By default this happened only at the solver's steps. But the `SavingCallback` has similar controls as the integrator. For example, if we want to save at every `0.1` seconds, we do can so using `saveat`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "saved_values = SavedValues(Float64, Tuple{Float64,Float64}) # New cache\ncb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values, saveat = 0.0:0.1:1.0)\nsol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "saved_values.t" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "saved_values.saveval" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### Exercise 3\n\nGo back to the Harmonic oscillator. Use the `SavingCallback` to save an array for the energy over time, and do this both with and without the `ManifoldProjection`. Plot the results to see the difference the projection makes." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/introduction/05-formatting_plots.ipynb b/notebook/introduction/05-formatting_plots.ipynb deleted file mode 100644 index fa2f5342..00000000 --- a/notebook/introduction/05-formatting_plots.ipynb +++ /dev/null @@ -1,211 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Formatting Plots\n### Chris Rackauckas\n\nSince the plotting functionality is implemented as a recipe to Plots.jl, [all of the options open to Plots.jl can be used in our plots](https://juliaplots.github.io/supported/). In addition, there are special features specifically for [differential equation plots](http://docs.juliadiffeq.org/latest/basics/plot.html). This tutorial will teach some of the most commonly used options. Let's first get the solution to some ODE. Here I will use one of the Lorenz ordinary differential equation. As with all commands in DifferentialEquations.jl, I got a plot of the solution by calling `solve` on the problem, and `plot` on the solution:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, Plots, ParameterizedFunctions\ngr()\nlorenz = @ode_def Lorenz begin\n dx = σ*(y-x)\n dy = ρ*x-y-x*z\n dz = x*y-β*z\nend σ β ρ\n\np = [10.0,8/3,28]\nu0 = [1., 5., 10.]\ntspan = (0., 100.)\nprob = ODEProblem(lorenz, u0, tspan, p)\nsol = solve(prob)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now let's change it to a phase plot. As discussed in the [plot functions page](http://docs.juliadiffeq.org/latest/basics/plot.html), we can use the `vars` command to choose the variables to plot. Let's plot variable `x` vs variable `y` vs variable `z`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1, 2, 3))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can also choose to plot the timeseries for a single variable:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=[:x])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that we were able to use the variable names because we had defined the problem with the macro. But in general, we can use the indices. The previous plots would be:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3))\nplot(sol,vars=[1])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Common options are to add titles, axis, and labels. For example:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,linewidth=5,title=\"Solution to the linear ODE with a thick line\",\nxaxis=\"Time (t)\",yaxis=\"u(t) (in mm)\",label=[\"X\",\"Y\",\"Z\"])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that series recipes apply to the solution type as well. For example, we can use a scatter plot on the timeseries:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "scatter(sol,vars=[:x])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This shows that the recipe is using the interpolation to smooth the plot. It becomes abundantly clear when we turn it off using `denseplot=false`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3),denseplot=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "When this is done, only the values the timestep hits are plotted. Using the interpolation usually results in a much nicer looking plot so it's recommended, and since the interpolations have similar orders to the numerical methods, their results are trustworthy on the full interval. We can control the number of points used in the interpolation's plot using the `plotdensity` command:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3),plotdensity=100)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "That's plotting the entire solution using 100 points spaced evenly in time." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3),plotdensity=10000)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "That's more like it! By default it uses `100*length(sol)`, where the length is the number of internal steps it had to take. This heuristic usually does well, but unusually difficult equations it can be relaxed (since it will take small steps), and for equations with events / discontinuities raising the plot density can help resolve the discontinuity.\n\nLastly notice that we can compose plots. Let's show where the 100 points are using a scatter plot:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol,vars=(1,2,3))\nscatter!(sol,vars=(1,2,3),plotdensity=100)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can instead work with an explicit plot object. This form can be better for building a complex plot in a loop." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "p = plot(sol,vars=(1,2,3))\nscatter!(p,sol,vars=(1,2,3),plotdensity=100)\ntitle!(\"I added a title\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "You can do all sorts of things. Have fun!" - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/models/01-classical_physics.ipynb b/notebook/models/01-classical_physics.ipynb deleted file mode 100644 index ef912dbe..00000000 --- a/notebook/models/01-classical_physics.ipynb +++ /dev/null @@ -1,240 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Classical Physics Models\n### Yingbo Ma, Chris Rackauckas\n\nIf you're getting some cold feet to jump in to DiffEq land, here are some handcrafted differential equations mini problems to hold your hand along the beginning of your journey.\n\n## Radioactive Decay of Carbon-14\n\n#### First order linear ODE\n\n$$f(t,u) = \\frac{du}{dt}$$\n\nThe Radioactive decay problem is the first order linear ODE problem of an exponential with a negative coefficient, which represents the half-life of the process in question. Should the coefficient be positive, this would represent a population growth equation." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using OrdinaryDiffEq, Plots\ngr()\n\n#Half-life of Carbon-14 is 5,730 years.\nC₁ = 5.730\n\n#Setup\nu₀ = 1.0\ntspan = (0.0, 1.0)\n\n#Define the problem\nradioactivedecay(u,p,t) = -C₁*u\n\n#Pass to solver\nprob = ODEProblem(radioactivedecay,u₀,tspan)\nsol = solve(prob,Tsit5())\n\n#Plot\nplot(sol,linewidth=2,title =\"Carbon-14 half-life\", xaxis = \"Time in thousands of years\", yaxis = \"Percentage left\", label = \"Numerical Solution\")\nplot!(sol.t, t->exp(-C₁*t),lw=3,ls=:dash,label=\"Analytical Solution\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Simple Pendulum\n\n#### Second Order Linear ODE\n\nWe will start by solving the pendulum problem. In the physics class, we often solve this problem by small angle approximation, i.e. $ sin(\\theta) \\approx \\theta$, because otherwise, we get an elliptic integral which doesn't have an analytic solution. The linearized form is\n\n$$\\ddot{\\theta} + \\frac{g}{L}{\\theta} = 0$$\n\nBut we have numerical ODE solvers! Why not solve the *real* pendulum?\n\n$$\\ddot{\\theta} + \\frac{g}{L}{\\sin(\\theta)} = 0$$" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Simple Pendulum Problem\nusing OrdinaryDiffEq, Plots\n\n#Constants\nconst g = 9.81\nL = 1.0\n\n#Initial Conditions\nu₀ = [0,π/2]\ntspan = (0.0,6.3)\n\n#Define the problem\nfunction simplependulum(du,u,p,t)\n θ = u[1]\n dθ = u[2]\n du[1] = dθ\n du[2] = -(g/L)*sin(θ)\nend\n\n#Pass to solvers\nprob = ODEProblem(simplependulum,u₀, tspan)\nsol = solve(prob,Tsit5())\n\n#Plot\nplot(sol,linewidth=2,title =\"Simple Pendulum Problem\", xaxis = \"Time\", yaxis = \"Height\", label = [\"Theta\",\"dTheta\"])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "So now we know that behaviour of the position versus time. However, it will be useful to us to look at the phase space of the pendulum, i.e., and representation of all possible states of the system in question (the pendulum) by looking at its velocity and position. Phase space analysis is ubiquitous in the analysis of dynamical systems, and thus we will provide a few facilities for it." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "p = plot(sol,vars = (1,2), xlims = (-9,9), title = \"Phase Space Plot\", xaxis = \"Velocity\", yaxis = \"Position\", leg=false)\nfunction phase_plot(prob, u0, p, tspan=2pi)\n _prob = ODEProblem(prob.f,u0,(0.0,tspan))\n sol = solve(_prob,Vern9()) # Use Vern9 solver for higher accuracy\n plot!(p,sol,vars = (1,2), xlims = nothing, ylims = nothing)\nend\nfor i in -4pi:pi/2:4π\n for j in -4pi:pi/2:4π\n phase_plot(prob, [j,i], p)\n end\nend\nplot(p,xlims = (-9,9))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Simple Harmonic Oscillator\n\n### Double Pendulum" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "#Double Pendulum Problem\nusing OrdinaryDiffEq, Plots\n\n#Constants and setup\nconst m₁, m₂, L₁, L₂ = 1, 2, 1, 2\ninitial = [0, π/3, 0, 3pi/5]\ntspan = (0.,50.)\n\n#Convenience function for transforming from polar to Cartesian coordinates\nfunction polar2cart(sol;dt=0.02,l1=L₁,l2=L₂,vars=(2,4))\n u = sol.t[1]:dt:sol.t[end]\n\n p1 = l1*map(x->x[vars[1]], sol.(u))\n p2 = l2*map(y->y[vars[2]], sol.(u))\n\n x1 = l1*sin.(p1)\n y1 = l1*-cos.(p1)\n (u, (x1 + l2*sin.(p2),\n y1 - l2*cos.(p2)))\nend\n\n#Define the Problem\nfunction double_pendulum(xdot,x,p,t)\n xdot[1]=x[2]\n xdot[2]=-((g*(2*m₁+m₂)*sin(x[1])+m₂*(g*sin(x[1]-2*x[3])+2*(L₂*x[4]^2+L₁*x[2]^2*cos(x[1]-x[3]))*sin(x[1]-x[3])))/(2*L₁*(m₁+m₂-m₂*cos(x[1]-x[3])^2)))\n xdot[3]=x[4]\n xdot[4]=(((m₁+m₂)*(L₁*x[2]^2+g*cos(x[1]))+L₂*m₂*x[4]^2*cos(x[1]-x[3]))*sin(x[1]-x[3]))/(L₂*(m₁+m₂-m₂*cos(x[1]-x[3])^2))\nend\n\n#Pass to Solvers\ndouble_pendulum_problem = ODEProblem(double_pendulum, initial, tspan)\nsol = solve(double_pendulum_problem, Vern7(), abs_tol=1e-10, dt=0.05);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "#Obtain coordinates in Cartesian Geometry\nts, ps = polar2cart(sol, l1=L₁, l2=L₂, dt=0.01)\nplot(ps...)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Poincaré section\n\nThe Poincaré section is a contour plot of a higher-dimensional phase space diagram. It helps to understand the dynamic interactions and is wonderfully pretty.\n\nThe following equation came from [StackOverflow question](https://mathematica.stackexchange.com/questions/40122/help-to-plot-poincar%C3%A9-section-for-double-pendulum)\n\n$$\\frac{d}{dt}\n \\begin{pmatrix}\n \\alpha \\\\ l_\\alpha \\\\ \\beta \\\\ l_\\beta\n \\end{pmatrix}=\n \\begin{pmatrix}\n 2\\frac{l_\\alpha - (1+\\cos\\beta)l_\\beta}{3-\\cos 2\\beta} \\\\\n -2\\sin\\alpha - \\sin(\\alpha + \\beta) \\\\\n 2\\frac{-(1+\\cos\\beta)l_\\alpha + (3+2\\cos\\beta)l_\\beta}{3-\\cos2\\beta}\\\\\n -\\sin(\\alpha+\\beta) - 2\\sin(\\beta)\\frac{(l_\\alpha-l_\\beta)l_\\beta}{3-\\cos2\\beta} + 2\\sin(2\\beta)\\frac{l_\\alpha^2-2(1+\\cos\\beta)l_\\alpha l_\\beta + (3+2\\cos\\beta)l_\\beta^2}{(3-\\cos2\\beta)^2}\n \\end{pmatrix}$$\n\nThe Poincaré section here is the collection of $(β,l_β)$ when $α=0$ and $\\frac{dα}{dt}>0$.\n\n#### Hamiltonian of a double pendulum\nNow we will plot the Hamiltonian of a double pendulum" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "#Constants and setup\nusing OrdinaryDiffEq\ninitial2 = [0.01, 0.005, 0.01, 0.01]\ntspan2 = (0.,200.)\n\n#Define the problem\nfunction double_pendulum_hamiltonian(udot,u,p,t)\n α = u[1]\n lα = u[2]\n β = u[3]\n lβ = u[4]\n udot .=\n [2(lα-(1+cos(β))lβ)/(3-cos(2β)),\n -2sin(α) - sin(α+β),\n 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)),\n -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)]\nend\n\n# Construct a ContiunousCallback\ncondition(u,t,integrator) = u[1]\naffect!(integrator) = nothing\ncb = ContinuousCallback(condition,affect!,nothing,\n save_positions = (true,false))\n\n# Construct Problem\npoincare = ODEProblem(double_pendulum_hamiltonian, initial2, tspan2)\nsol2 = solve(poincare, Vern9(), save_everystep = false, callback=cb, abstol=1e-9)\n\nfunction poincare_map(prob, u₀, p; callback=cb)\n _prob = ODEProblem(prob.f,[0.01, 0.01, 0.01, u₀],prob.tspan)\n sol = solve(_prob, Vern9(), save_everystep = false, callback=cb, abstol=1e-9)\n scatter!(p, sol, vars=(3,4), markersize = 2)\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "p = scatter(sol2, vars=(3,4), leg=false, markersize = 2, ylims=(-0.01,0.03))\nfor i in -0.01:0.00125:0.01\n poincare_map(poincare, i, p)\nend\nplot(p,ylims=(-0.01,0.03))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Hénon-Heiles System\n\nThe Hénon-Heiles potential occurs when non-linear motion of a star around a galactic center with the motion restricted to a plane.\n\n$$\n\\begin{align}\n\\frac{d^2x}{dt^2}&=-\\frac{\\partial V}{\\partial x}\\\\\n\\frac{d^2y}{dt^2}&=-\\frac{\\partial V}{\\partial y}\n\\end{align}\n$$\n\nwhere\n\n$$V(x,y)={\\frac {1}{2}}(x^{2}+y^{2})+\\lambda \\left(x^{2}y-{\\frac {y^{3}}{3}}\\right).$$\n\nWe pick $\\lambda=1$ in this case, so\n\n$$V(x,y) = \\frac{1}{2}(x^2+y^2+2x^2y-\\frac{2}{3}y^3).$$\n\nThen the total energy of the system can be expressed by\n\n$$E = T+V = V(x,y)+\\frac{1}{2}(\\dot{x}^2+\\dot{y}^2).$$\n\nThe total energy should conserve as this system evolves." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using OrdinaryDiffEq, Plots\n\n#Setup\ninitial = [0.,0.1,0.5,0]\ntspan = (0,100.)\n\n#Remember, V is the potential of the system and T is the Total Kinetic Energy, thus E will\n#the total energy of the system.\nV(x,y) = 1//2 * (x^2 + y^2 + 2x^2*y - 2//3 * y^3)\nE(x,y,dx,dy) = V(x,y) + 1//2 * (dx^2 + dy^2);\n\n#Define the function\nfunction Hénon_Heiles(du,u,p,t)\n x = u[1]\n y = u[2]\n dx = u[3]\n dy = u[4]\n du[1] = dx\n du[2] = dy\n du[3] = -x - 2x*y\n du[4] = y^2 - y -x^2\nend\n\n#Pass to solvers\nprob = ODEProblem(Hénon_Heiles, initial, tspan)\nsol = solve(prob, Vern9(), abs_tol=1e-16, rel_tol=1e-16);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Plot the orbit\nplot(sol, vars=(1,2), title = \"The orbit of the Hénon-Heiles system\", xaxis = \"x\", yaxis = \"y\", leg=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "#Optional Sanity check - what do you think this returns and why?\n@show sol.retcode\n\n#Plot -\nplot(sol, vars=(1,3), title = \"Phase space for the Hénon-Heiles system\", xaxis = \"Position\", yaxis = \"Velocity\")\nplot!(sol, vars=(2,4), leg = false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "#We map the Total energies during the time intervals of the solution (sol.u here) to a new vector\n#pass it to the plotter a bit more conveniently\nenergy = map(x->E(x...), sol.u)\n\n#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great.\n@show ΔE = energy[1]-energy[end]\n\n#Plot\nplot(sol.t, energy, title = \"Change in Energy over Time\", xaxis = \"Time in iterations\", yaxis = \"Change in Energy\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Symplectic Integration\n\nTo prevent energy drift, we can instead use a symplectic integrator. We can directly define and solve the `SecondOrderODEProblem`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function HH_acceleration!(dv,v,u,p,t)\n x,y = u\n dx,dy = dv\n dv[1] = -x - 2x*y\n dv[2] = y^2 - y -x^2\nend\ninitial_positions = [0.0,0.1]\ninitial_velocities = [0.5,0.0]\nprob = SecondOrderODEProblem(HH_acceleration!,initial_velocities,initial_positions,tspan)\nsol2 = solve(prob, KahanLi8(), dt=1/10);" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that we get the same results:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Plot the orbit\nplot(sol2, vars=(3,4), title = \"The orbit of the Hénon-Heiles system\", xaxis = \"x\", yaxis = \"y\", leg=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol2, vars=(3,1), title = \"Phase space for the Hénon-Heiles system\", xaxis = \"Position\", yaxis = \"Velocity\")\nplot!(sol2, vars=(4,2), leg = false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "but now the energy change is essentially zero:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "energy = map(x->E(x[3], x[4], x[1], x[2]), sol2.u)\n#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great.\n@show ΔE = energy[1]-energy[end]\n\n#Plot\nplot(sol2.t, energy, title = \"Change in Energy over Time\", xaxis = \"Time in iterations\", yaxis = \"Change in Energy\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "It's so close to zero it breaks GR! And let's try to use a Runge-Kutta-Nyström solver to solve this. Note that Runge-Kutta-Nyström isn't symplectic." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol3 = solve(prob, DPRKN6());\nenergy = map(x->E(x[3], x[4], x[1], x[2]), sol3.u)\n@show ΔE = energy[1]-energy[end]\ngr()\nplot(sol3.t, energy, title = \"Change in Energy over Time\", xaxis = \"Time in iterations\", yaxis = \"Change in Energy\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note that we are using the `DPRKN6` sovler at `reltol=1e-3` (the default), yet it has a smaller energy variation than `Vern9` at `abs_tol=1e-16, rel_tol=1e-16`. Therefore, using specialized solvers to solve its particular problem is very efficient." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/models/02-conditional_dosing.ipynb b/notebook/models/02-conditional_dosing.ipynb deleted file mode 100644 index b05f6b57..00000000 --- a/notebook/models/02-conditional_dosing.ipynb +++ /dev/null @@ -1,140 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Conditional Dosing Pharmacometric Example\n### Chris Rackauckas\n\nIn this example we will show how to model a conditional dosing using the `DiscreteCallbacks`. The problem is as follows. The patient has a drug `A(t)` in their system. The concentration of the drug is given as `C(t)=A(t)/V` for some volume constant `V`. At `t=4`, the patient goes to the clinic and is checked. If the concentration of the drug in their body is below `4`, then they will receive a new dose.\n\nFor our model, we will use the simple decay equation. We will write this in the in-place form to make it easy to extend to more complicated examples:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations\nfunction f(du,u,p,t)\n du[1] = -u[1]\nend\nu0 = [10.0]\nconst V = 1\nprob = ODEProblem(f,u0,(0.0,10.0))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's see what the solution looks like without any events." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5())\nusing Plots; gr()\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We see that at time `t=4`, the patient should receive a dose. Let's code up that event. We need to check at `t=4` if the concentration `u[1]/4` is `<4`, and if so, add `10` to `u[1]`. We do this with the following:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "condition(u,t,integrator) = t==4 && u[1]/V<4\naffect!(integrator) = integrator.u[1] += 10\ncb = DiscreteCallback(condition,affect!)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now we will give this callback to the solver, and tell it to stop at `t=4` so that way the condition can be checked:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb)\nusing Plots; gr()\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's show that it actually added 10 instead of setting the value to 10. We could have set the value using `affect!(integrator) = integrator.u[1] = 10`" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "println(sol(4.00000))\nprintln(sol(4.000000000001))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now let's model a patient whose decay rate for the drug is lower:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function f(du,u,p,t)\n du[1] = -u[1]/6\nend\nu0 = [10.0]\nconst V = 1\nprob = ODEProblem(f,u0,(0.0,10.0))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5())\nusing Plots; gr()\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Under the same criteria, with the same event, this patient will not receive a second dose:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb)\nusing Plots; gr()\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/models/03-diffeqbio_I_introduction.ipynb b/notebook/models/03-diffeqbio_I_introduction.ipynb deleted file mode 100644 index 56e79229..00000000 --- a/notebook/models/03-diffeqbio_I_introduction.ipynb +++ /dev/null @@ -1,243 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# DiffEqBiological Tutorial I: Introduction\n### Samuel Isaacson\n\nDiffEqBiological.jl is a domain specific language (DSL) for writing chemical\nreaction networks in Julia. The generated chemical reaction network model can\nthen be translated into a variety of mathematical models which can be solved\nusing components of the broader\n[DifferentialEquations.jl](http://juliadiffeq.org/) ecosystem.\n\nIn this tutorial we'll provide an introduction to using DiffEqBiological to\nspecify chemical reaction networks, and then to solve ODE, jump, tau-leaping and\nSDE models generated from them. Let's start by using the DiffEqBiological\n`reaction_network` macro to specify a simply chemical reaction network; the\nwell-known Repressilator. \n\nWe first import the basic packages we'll need, and use Plots.jl for making\nfigures:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# If not already installed, first hit \"]\" within a Julia REPL. Then type:\n# add DifferentialEquations DiffEqBiological PyPlot Plots Latexify \n\nusing DifferentialEquations, DiffEqBiological, Plots, Latexify\npyplot(fmt=:svg);" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We now construct the reaction network. The basic types of arrows and predefined\nrate laws one can use are discussed in detail within the DiffEqBiological\n[Chemical Reaction Models\ndocumentation](http://docs.juliadiffeq.org/latest/models/biological.html). Here\nwe use a mix of first order, zero order and repressive Hill function rate laws.\nNote, $\\varnothing$ corresponds to the empty state, and is used for zeroth order\nproduction and first order degradation reactions:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "repressilator = @reaction_network begin\n hillr(P₃,α,K,n), ∅ --> m₁\n hillr(P₁,α,K,n), ∅ --> m₂\n hillr(P₂,α,K,n), ∅ --> m₃\n (δ,γ), m₁ ↔ ∅\n (δ,γ), m₂ ↔ ∅\n (δ,γ), m₃ ↔ ∅\n β, m₁ --> m₁ + P₁\n β, m₂ --> m₂ + P₂\n β, m₃ --> m₃ + P₃\n μ, P₁ --> ∅\n μ, P₂ --> ∅\n μ, P₃ --> ∅\nend α K n δ γ β μ;" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can use Latexify to look at the corresponding reactions and understand the\ngenerated rate laws for each reaction" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "latexify(repressilator; env=:chemical)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can also use Latexify to look at the corresponding ODE model for the chemical\nsystem" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "latexify(repressilator, cdot=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "To solve the ODEs we need to specify the values of the parameters in the model,\nthe initial condition, and the time interval to solve the model on. To do this\nit helps to know the orderings of the parameters and the species. Parameters are\nordered in the same order they appear after the `end` statement in the\n`@reaction_network` macro. Species are ordered in the order they first appear\nwithin the `@reaction_network` macro. We can see these orderings using the\n`speciesmap` and `paramsmap` functions:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "speciesmap(repressilator)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "paramsmap(repressilator)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Solving the ODEs:\nKnowing these orderings, we can create parameter and initial condition vectors,\nand setup the `ODEProblem` we want to solve:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# parameters [α,K,n,δ,γ,β,μ]\np = (.5, 40, 2, log(2)/120, 5e-3, 20*log(2)/120, log(2)/60)\n\n# initial condition [m₁,m₂,m₃,P₁,P₂,P₃]\nu₀ = [0.,0.,0.,20.,0.,0.]\n\n# time interval to solve on\ntspan = (0., 10000.)\n\n# create the ODEProblem we want to solve\noprob = ODEProblem(repressilator, u₀, tspan, p)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "At this point we are all set to solve the ODEs. We can now use any ODE solver\nfrom within the DiffEq package. We'll just use the default DifferentialEquations\nsolver for now, and then plot the solutions:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(oprob, saveat=10.)\nplot(sol, fmt=:svg)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We see the well-known oscillatory behavior of the repressilator! For more on\nchoices of ODE solvers, see the JuliaDiffEq\n[documentation](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html).\n\n---\n\n## Stochastic Simulation Algorithms (SSAs) for Stochastic Chemical Kinetics\nLet's now look at a stochastic chemical kinetics model of the repressilator,\nmodeling it with jump processes. Here we will construct a DiffEqJump\n`JumpProblem` that uses Gillespie's `Direct` method, and then solve it to\ngenerate one realization of the jump process:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# first we redefine the initial condition to be integer valued\nu₀ = [0,0,0,20,0,0]\n\n# next we create a discrete problem to encode that our species are integer valued:\ndprob = DiscreteProblem(repressilator, u₀, tspan, p)\n\n# now we create a JumpProblem, and specify Gillespie's Direct Method as the solver:\njprob = JumpProblem(dprob, Direct(), repressilator, save_positions=(false,false))\n\n# now let's solve and plot the jump process:\nsol = solve(jprob, SSAStepper(), saveat=10.)\nplot(sol, fmt=:svg)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here we see that oscillations remain, but become much noiser. Note, in\nconstructing the `JumpProblem` we could have used any of the SSAs that are part\nof DiffEqJump instead of the `Direct` method, see the list of SSAs (i.e.\nconstant rate jump aggregators) in the\n[documentation](http://docs.juliadiffeq.org/latest/types/jump_types.html#Constant-Rate-Jump-Aggregators-1).\n\n---\n## $\\tau$-leaping Methods:\nWhile SSAs generate exact realizations for stochastic chemical kinetics jump\nprocess models, [$\\tau$-leaping](https://en.wikipedia.org/wiki/Tau-leaping)\nmethods offer a performant alternative by discretizing in time the underlying\ntime-change representation of the stochastic process. The DiffEqJump package has\nlimited support for $\\tau$-leaping methods in the form of the basic Euler's\nmethod type approximation proposed by Gillespie. We can simulate a $\\tau$-leap\napproximation to the repressilator by using the `RegularJump` representation of\nthe network to construct a `JumpProblem`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "rjs = regularjumps(repressilator)\nlprob = JumpProblem(dprob, Direct(), rjs)\nlsol = solve(lprob, SimpleTauLeaping(), dt=.1)\nplot(lsol, plotdensity=1000, fmt=:svg)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "---\n## Chemical Langevin Equation (CLE) Stochastic Differential Equation (SDE) Models:\nAt an intermediary physical scale between macroscopic ODE models and microscopic\nstochastic chemical kinetic models lies the CLE, a SDE version of the model. The\nSDEs add to each ODE above a noise term. As the repressilator has species that\nget very close to zero in size, it is not a good candidate to model with the CLE\n(where solutions can then go negative and become unphysical). Let's create a\nsimpler reaction network for a birth-death process that will stay non-negative:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "bdp = @reaction_network begin\n c₁, X --> 2X\n c₂, X --> 0\n c₃, 0 --> X\nend c₁ c₂ c₃\np = (1.0,2.0,50.)\nu₀ = [5.]\ntspan = (0.,4.);" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The corresponding Chemical Langevin Equation SDE is then" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "latexify(bdp, noise=true, cdot=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "where each $W_i(t)$ denotes an independent Brownian Motion. We can solve the CLE\nSDE model by creating an `SDEProblem` and solving it similar to what we did for\nODEs above:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# SDEProblem for CLE\nsprob = SDEProblem(bdp, u₀, tspan, p)\n\n# solve and plot, tstops is used to specify enough points \n# that the plot looks well-resolved\nsol = solve(sprob, tstops=range(0., step=4e-3, length=1001))\nplot(sol, fmt=:svg)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We again have complete freedom to select any of the\nStochasticDifferentialEquations.jl SDE solvers, see the\n[documentation](http://docs.juliadiffeq.org/latest/solvers/sde_solve.html).\n\n---\n## What information can be queried from the reaction_network:\nThe generated `reaction_network` contains a lot of basic information. For example\n- `f=oderhsfun(repressilator)` is a function `f(du,u,p,t)` that given the current\n state vector `u` and time `t` fills `du` with the time derivatives of `u`\n (i.e. the right hand side of the ODEs).\n- `jac=jacfun(repressilator)` is a function `jac(J,u,p,t)` that evaluates and\n returns the Jacobian of the ODEs in `J`. A corresponding Jacobian matrix of\n expressions can be accessed using the `jacobianexprs` function:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "latexify(jacobianexprs(repressilator), cdot=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "- `pjac = paramjacfun(repressilator)` is a function `pjac(pJ,u,p,t)` that\n evaluates and returns the Jacobian, `pJ`, of the ODEs *with respect to the\n parameters*. This allows `reaction_network`s to be used in the\n DifferentialEquations.jl local sensitivity analysis package\n [DiffEqSensitivity](http://docs.juliadiffeq.org/latest/analysis/sensitivity.html).\n\n\nBy default, generated `ODEProblems` will be passed the corresponding Jacobian\nfunction, which will then be used within implicit ODE/SDE methods. \n\nThe [DiffEqBiological API\ndocumentation](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html) provides\na thorough description of the many query functions that are provided to access\nnetwork properties and generated functions. In DiffEqBiological Tutorial II\nwe'll explore the API.\n\n---\n## Getting Help\nHave a question related to DiffEqBiological or this tutorial? Feel free to ask\nin the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby).\nIf you think you've found a bug in DiffEqBiological, or would like to\nrequest/discuss new functionality, feel free to open an issue on\n[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check\nthere is no related issue already open). If you've found a bug in this tutorial,\nor have a suggestion, feel free to open an issue on the [DiffEqTutorials Github\nsite](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull\nrequest to DiffEqTutorials updating the tutorial!\n\n---" - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.2.0" - }, - "kernelspec": { - "name": "julia-1.2", - "display_name": "Julia 1.2.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/models/04-diffeqbio_II_networkproperties.ipynb b/notebook/models/04-diffeqbio_II_networkproperties.ipynb deleted file mode 100644 index f201d836..00000000 --- a/notebook/models/04-diffeqbio_II_networkproperties.ipynb +++ /dev/null @@ -1,6096 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# DiffEqBiological Tutorial II: Network Properties API\n", - "### Samuel Isaacson\n", - "\n", - "The [DiffEqBiological\n", - "API](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html) provides a\n", - "collection of functions for easily accessing network properties, and for\n", - "incrementally building and extending a network. In this tutorial we'll go\n", - "through the API, and then illustrate how to programmatically construct a\n", - "network.\n", - "\n", - "We'll illustrate the API using a toggle-switch like network that contains a\n", - "variety of different reaction types:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "using DifferentialEquations, DiffEqBiological, Latexify, Plots\n", - "fmt = :svg\n", - "pyplot(fmt=fmt)\n", - "rn = @reaction_network begin\n", - " hillr(D₂,α,K,n), ∅ --> m₁\n", - " hillr(D₁,α,K,n), ∅ --> m₂\n", - " (δ,γ), m₁ ↔ ∅\n", - " (δ,γ), m₂ ↔ ∅\n", - " β, m₁ --> m₁ + P₁\n", - " β, m₂ --> m₂ + P₂\n", - " μ, P₁ --> ∅\n", - " μ, P₂ --> ∅\n", - " (k₊,k₋), 2P₁ ↔ D₁ \n", - " (k₊,k₋), 2P₂ ↔ D₂\n", - " (k₊,k₋), P₁+P₂ ↔ T\n", - "end α K n δ γ β μ k₊ k₋;" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This corresponds to the chemical reaction network given by" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "data": { - "text/latex": [ - "\\begin{align}\n", - "\\require{mhchem}\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + D_2^{n}}] m_{1}}\\\\\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + D_1^{n}}] m_{2}}\\\\\n", - "\\ce{ m_{1} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{2} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{1} &->[\\beta] m_{1} + P_{1}}\\\\\n", - "\\ce{ m_{2} &->[\\beta] m_{2} + P_{2}}\\\\\n", - "\\ce{ P_{1} &->[\\mu] \\varnothing}\\\\\n", - "\\ce{ P_{2} &->[\\mu] \\varnothing}\\\\\n", - "\\ce{ 2 \\cdot P_1 &<=>[k_{+}][k_{-}] D_{1}}\\\\\n", - "\\ce{ 2 \\cdot P_2 &<=>[k_{+}][k_{-}] D_{2}}\\\\\n", - "\\ce{ P_{1} + P_{2} &<=>[k_{+}][k_{-}] T}\n", - "\\end{align}\n" - ], - "text/plain": [ - "L\"\\begin{align}\n", - "\\require{mhchem}\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + D_2^{n}}] m_{1}}\\\\\n", - "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + D_1^{n}}] m_{2}}\\\\\n", - "\\ce{ m_{1} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{2} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", - "\\ce{ m_{1} &->[\\beta] m_{1} + P_{1}}\\\\\n", - "\\ce{ m_{2} &->[\\beta] m_{2} + P_{2}}\\\\\n", - "\\ce{ P_{1} &->[\\mu] \\varnothing}\\\\\n", - "\\ce{ P_{2} &->[\\mu] \\varnothing}\\\\\n", - "\\ce{ 2 \\cdot P_1 &<=>[k_{+}][k_{-}] D_{1}}\\\\\n", - "\\ce{ 2 \\cdot P_2 &<=>[k_{+}][k_{-}] D_{2}}\\\\\n", - "\\ce{ P_{1} + P_{2} &<=>[k_{+}][k_{-}] T}\n", - "\\end{align}\n", - "\"" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "latexify(rn; env=:chemical)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Network Properties\n", - "[Basic\n", - "properties](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Basic-properties-1)\n", - "of the generated network include the `speciesmap` and `paramsmap` functions we\n", - "examined in the last tutorial, along with the corresponding `species` and\n", - "`params` functions:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "7-element Array{Symbol,1}:\n", - " :m₁\n", - " :m₂\n", - " :P₁\n", - " :P₂\n", - " :D₁\n", - " :D₂\n", - " :T " - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "species(rn)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "9-element Array{Symbol,1}:\n", - " :α \n", - " :K \n", - " :n \n", - " :δ \n", - " :γ \n", - " :β \n", - " :μ \n", - " :k₊\n", - " :k₋" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "params(rn)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The numbers of species, parameters and reactions can be accessed using\n", - "`numspecies(rn)`, `numparams(rn)` and `numreactions(rn)`.\n", - "\n", - "A number of functions are available to access [properties of\n", - "reactions](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Reaction-Properties-1)\n", - "within the generated network, including `substrates`, `products`, `dependents`,\n", - "`ismassaction`, `substratestoich`, `substratesymstoich`, `productstoich`,\n", - "`productsymstoich`, and `netstoich`. Each of these functions takes two\n", - "arguments, the reaction network `rn` and the index of the reaction to query\n", - "information about. For example, to find the substrate symbols and their\n", - "corresponding stoichiometries for the 11th reaction, `2P₁ --> D₁`, we would use" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "1-element Array{DiffEqBiological.ReactantStruct,1}:\n", - " DiffEqBiological.ReactantStruct(:P₁, 2)" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "substratesymstoich(rn, 11)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Broadcasting works on all these functions, allowing the construction of a vector\n", - "holding the queried information across all reactions, i.e." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "16-element Array{Array{DiffEqBiological.ReactantStruct,1},1}:\n", - " [] \n", - " [] \n", - " [ReactantStruct(:m₁, 1)] \n", - " [] \n", - " [ReactantStruct(:m₂, 1)] \n", - " [] \n", - " [ReactantStruct(:m₁, 1)] \n", - " [ReactantStruct(:m₂, 1)] \n", - " [ReactantStruct(:P₁, 1)] \n", - " [ReactantStruct(:P₂, 1)] \n", - " [ReactantStruct(:P₁, 2)] \n", - " [ReactantStruct(:D₁, 1)] \n", - " [ReactantStruct(:P₂, 2)] \n", - " [ReactantStruct(:D₂, 1)] \n", - " [ReactantStruct(:P₁, 1), ReactantStruct(:P₂, 1)]\n", - " [ReactantStruct(:T, 1)] " - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "substratesymstoich.(rn, 1:numreactions(rn))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To see the net stoichiometries for all reactions we would use" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "16-element Array{Array{Pair{Int64,Int64},1},1}:\n", - " [1=>1] \n", - " [2=>1] \n", - " [1=>-1] \n", - " [1=>1] \n", - " [2=>-1] \n", - " [2=>1] \n", - " [3=>1] \n", - " [4=>1] \n", - " [3=>-1] \n", - " [4=>-1] \n", - " [3=>-2, 5=>1] \n", - " [3=>2, 5=>-1] \n", - " [4=>-2, 6=>1] \n", - " [4=>2, 6=>-1] \n", - " [3=>-1, 4=>-1, 7=>1]\n", - " [3=>1, 4=>1, 7=>-1] " - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "netstoich.(rn, 1:numreactions(rn))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here the first integer in each pair corresponds to the index of the species\n", - "(with symbol `species(rn)[index]`). The second integer corresponds to the net\n", - "stoichiometric coefficient of the species within the reaction. `substratestoich`\n", - "and `productstoich` are defined similarly. \n", - "\n", - "Several functions are also provided that calculate different types of\n", - "[dependency\n", - "graphs](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Dependency-Graphs-1).\n", - "These include `rxtospecies_depgraph`, which provides a mapping from reaction\n", - "index to the indices of species whose population changes when the reaction\n", - "occurs:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "16-element Array{Array{Int64,1},1}:\n", - " [1] \n", - " [2] \n", - " [1] \n", - " [1] \n", - " [2] \n", - " [2] \n", - " [3] \n", - " [4] \n", - " [3] \n", - " [4] \n", - " [3, 5] \n", - " [3, 5] \n", - " [4, 6] \n", - " [4, 6] \n", - " [3, 4, 7]\n", - " [3, 4, 7]" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "rxtospecies_depgraph(rn)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here the last row indicates that the species with indices `[3,4,7]` will change\n", - "values when the reaction `T --> P₁ + P₂` occurs. To confirm these are the\n", - "correct species we can look at" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "3-element Array{Symbol,1}:\n", - " :P₁\n", - " :P₂\n", - " :T " - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "species(rn)[[3,4,7]]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The `speciestorx_depgraph` similarly provides a mapping from species to reactions \n", - "for which their *rate laws* depend on that species. These correspond to all reactions\n", - "for which the given species is in the `dependent` set of the reaction. We can verify this\n", - "for the first species, `m₁`:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "2-element Array{Int64,1}:\n", - " 3\n", - " 7" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "speciestorx_depgraph(rn)[1]" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "2-element Array{Int64,1}:\n", - " 3\n", - " 7" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "findall(depset -> in(:m₁, depset), dependents.(rn, 1:numreactions(rn)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, `rxtorx_depgraph` provides a mapping that shows when a given reaction\n", - "occurs, which other reactions have rate laws that involve species whose value\n", - "would have changed:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "16-element Array{Array{Int64,1},1}:\n", - " [1, 3, 7] \n", - " [2, 5, 8] \n", - " [3, 7] \n", - " [3, 4, 7] \n", - " [5, 8] \n", - " [5, 6, 8] \n", - " [7, 9, 11, 15] \n", - " [8, 10, 13, 15] \n", - " [9, 11, 15] \n", - " [10, 13, 15] \n", - " [2, 9, 11, 12, 15] \n", - " [2, 9, 11, 12, 15] \n", - " [1, 10, 13, 14, 15] \n", - " [1, 10, 13, 14, 15] \n", - " [9, 10, 11, 13, 15, 16]\n", - " [9, 10, 11, 13, 15, 16]" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "rxtorx_depgraph(rn)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Note on Using Network Property API Functions\n", - "Many basic network query and reaction property functions are simply accessors,\n", - "returning information that is already stored within the generated\n", - "`reaction_network`. For these functions, modifying the returned data structures\n", - "may lead to inconsistent internal state within the network. As such, they should\n", - "be used for accessing, but not modifying, network properties. The [API\n", - "documentation](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html)\n", - "indicates which functions return newly allocated data structures and which\n", - "return data stored within the `reaction_network`.\n", - "\n", - "---\n", - "## Incremental Construction of Networks\n", - "The `@reaction_network` macro is monolithic, in that it not only constructs and\n", - "stores basic network properties such as the reaction stoichiometries, but also\n", - "generates **everything** needed to immediately solve ODE, SDE and jump models\n", - "using the network. This includes Jacobian functions, noise functions, and jump\n", - "functions for each reaction. While this allows for a compact interface to the\n", - "DifferentialEquations.jl solvers, it can also be computationally expensive for\n", - "large networks, where a user may only wish to solve one type of problem and/or\n", - "have fine-grained control over what is generated. In addition, some types of\n", - "reaction network structures are more amenable to being constructed\n", - "programmatically, as opposed to writing out all reactions by hand within one\n", - "macro. For these reasons DiffEqBiological provides two additional macros that\n", - "only *initially* setup basic reaction network properties, and which can be\n", - "extended through a programmatic interface: `@min_reaction_network` and\n", - "`@empty_reaction_network`. We now give an introduction to constructing these\n", - "more minimal network representations, and how they can be programmatically\n", - "extended. See also the relevant [API\n", - "section](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Reaction-Network-Generation-Macros-1).\n", - "\n", - "The `@min_reaction_network` macro works identically to the `@reaction_network`\n", - "macro, but the generated network will only be complete with respect to its\n", - "representation of chemical network properties (i.e. species, parameters and\n", - "reactions). No ODE, SDE or jump models are generated during the macro call. It\n", - "can subsequently be extended with the addition of new species, parameters or\n", - "reactions. The `@empty_reaction_network` allocates an empty network structure\n", - "that can also be extended using the programmatic interface. For example, consider\n", - "a partial version of the toggle-switch like network we defined above:" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "rnmin = @min_reaction_network begin\n", - " (δ,γ), m₁ ↔ ∅\n", - " (δ,γ), m₂ ↔ ∅\n", - " β, m₁ --> m₁ + P₁\n", - " β, m₂ --> m₂ + P₂\n", - " μ, P₁ --> ∅\n", - " μ, P₂ --> ∅\n", - "end δ γ β μ;" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here we have left out the first two, and last three, reactions from the original\n", - "`reaction_network`. To expand the network until it is functionally equivalent to\n", - "the original model we add back in the missing species, parameters, and *finally*\n", - "the missing reactions. Note, it is required that species and parameters be\n", - "defined before any reactions using them are added. The necessary network\n", - "extension functions are given by `addspecies!`, `addparam!` and `addreaction!`,\n", - "and described in the\n", - "[API](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Functions-to-Add-Species,-Parameters-and-Reactions-to-a-Network-1). To complete `rnmin` we first add the relevant\n", - "species:" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "addspecies!(rnmin, :D₁)\n", - "addspecies!(rnmin, :D₂)\n", - "addspecies!(rnmin, :T)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next we add the needed parameters" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [], - "source": [ - "addparam!(rnmin, :α)\n", - "addparam!(rnmin, :K)\n", - "addparam!(rnmin, :n)\n", - "addparam!(rnmin, :k₊)\n", - "addparam!(rnmin, :k₋)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note, both `addspecies!` and `addparam!` also accept strings encoding the\n", - "variable names (which are then converted to `Symbol`s internally).\n", - "\n", - "We are now ready to add the missing reactions. The API provides two forms of the\n", - "`addreaction!` function, one takes expressions analogous to what one would write\n", - "in the macro:" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "addreaction!(rnmin, :(hillr(D₁,α,K,n)), :(∅ --> m₂))\n", - "addreaction!(rnmin, :((k₊,k₋)), :(2P₂ ↔ D₂))\n", - "addreaction!(rnmin, :k₊, :(2P₁ --> D₁))\n", - "addreaction!(rnmin, :k₋, :(D₁ --> 2P₁))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The rate can be an expression or symbol as above, but can also just be a\n", - "numeric value. The second form of `addreaction!` takes tuples of\n", - "`Pair{Symbol,Int}` that encode the stoichiometric coefficients of substrates and\n", - "reactants:" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [], - "source": [ - "# signature is addreaction!(rnmin, paramexpr, substratestoich, productstoich)\n", - "addreaction!(rnmin, :(hillr(D₂,α,K,n)), (), (:m₁ => 1,))\n", - "addreaction!(rnmin, :k₊, (:P₁=>1, :P₂=>1), (:T=>1,))\n", - "addreaction!(rnmin, :k₋, (:T=>1,), (:P₁=>1, :P₂=>1))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's check that `rn` and `rnmin` have the same set of species:" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0-element Array{Symbol,1}" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "setdiff(species(rn), species(rnmin))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "the same set of params:" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0-element Array{Symbol,1}" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "setdiff(params(rn), params(rnmin))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "and the final reaction has the same substrates, reactions, and rate expression:" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0-element Array{Symbol,1}" - ] - }, - "execution_count": 20, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "rxidx = numreactions(rn)\n", - "setdiff(substrates(rn, rxidx), substrates(rnmin, rxidx))" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0-element Array{Symbol,1}" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "setdiff(products(rn, rxidx), products(rnmin, rxidx))" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "true" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "rateexpr(rn, rxidx) == rateexpr(rnmin, rxidx)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Extending Incrementally Generated Networks to Include ODEs, SDEs or Jumps\n", - "Once a network generated from `@min_reaction_network` or\n", - "`@empty_reaction_network` has had all the associated species, parameters and\n", - "reactions filled in, corresponding ODE, SDE or jump models can be constructed.\n", - "The relevant API functions are `addodes!`, `addsdes!` and `addjumps!`. One\n", - "benefit to contructing models with these functions is that they offer more\n", - "fine-grained control over what actually gets constructed. For example,\n", - "`addodes!` has the optional keyword argument, `build_jac`, which if set to\n", - "`false` will disable construction of symbolic Jacobians and functions for\n", - "evaluating Jacobians. For large networks this can give a significant speed-up in\n", - "the time required for constructing an ODE model. Each function and its\n", - "associated keyword arguments are described in the API section, [Functions to add\n", - "ODEs, SDEs or Jumps to a\n", - "Network](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Functions-to-Add-ODEs,-SDEs-or-Jumps-to-a-Network-1).\n", - "\n", - "Let's extend `rnmin` to include the needed functions for use in ODE\n", - "solvers:" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [], - "source": [ - "addodes!(rnmin)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The [Generated Functions for\n", - "Models](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Generated-Functions-for-Models-1)\n", - "section of the API shows what functions have been generated. For ODEs these\n", - "include `oderhsfun(rnmin)`, which returns a function of the form `f(du,u,p,t)`\n", - "which evaluates the ODEs (i.e. the time derivatives of `u`) within `du`. For\n", - "each generated function, the corresponding expressions from which it was\n", - "generated can be retrieved using accessors from the [Generated\n", - "Expressions](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Generated-Expressions-1)\n", - "section of the API. The equations within `du` can be retrieved using the\n", - "`odeexprs(rnmin)` function. For example:" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "7-element Array{Union{Float64, Int64, Expr, Symbol},1}:\n", - " :((-(δ * m₁) + γ) + (α * K ^ n) / (K ^ n + D₂ ^ n)) \n", - " :((-(δ * m₂) + γ) + (α * K ^ n) / (K ^ n + D₁ ^ n)) \n", - " :(((((β * m₁ - μ * P₁) + -2 * (k₊ / 2) * P₁ ^ 2) + 2 * k₋ * D₁) - k₊ * P₁ * P₂) + k₋ * T)\n", - " :(((((β * m₂ - μ * P₂) + -2 * (k₊ / 2) * P₂ ^ 2) + 2 * k₋ * D₂) - k₊ * P₁ * P₂) + k₋ * T)\n", - " :((k₊ / 2) * P₁ ^ 2 - k₋ * D₁) \n", - " :((k₊ / 2) * P₂ ^ 2 - k₋ * D₂) \n", - " :(k₊ * P₁ * P₂ - k₋ * T) " - ] - }, - "execution_count": 24, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "odeexprs(rnmin)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Using Latexify we can see the ODEs themselves to compare with these expressions:" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [ - { - "data": { - "text/latex": [ - "\\begin{align}\n", - "\\frac{dm_1}{dt} =& - \\delta \\cdot m_1 + \\gamma + \\frac{\\alpha \\cdot K^{n}}{K^{n} + D_2^{n}} \\\\\n", - "\\frac{dm_2}{dt} =& - \\delta \\cdot m_2 + \\gamma + \\frac{\\alpha \\cdot K^{n}}{K^{n} + D_1^{n}} \\\\\n", - "\\frac{dP_1}{dt} =& \\beta \\cdot m_1 - \\mu \\cdot P_1 -2 \\cdot \\frac{k_+}{2} \\cdot P_1^{2} + 2 \\cdot k_- \\cdot D_1 - k_+ \\cdot P_1 \\cdot P_2 + k_- \\cdot T \\\\\n", - "\\frac{dP_2}{dt} =& \\beta \\cdot m_2 - \\mu \\cdot P_2 -2 \\cdot \\frac{k_+}{2} \\cdot P_2^{2} + 2 \\cdot k_- \\cdot D_2 - k_+ \\cdot P_1 \\cdot P_2 + k_- \\cdot T \\\\\n", - "\\frac{dD_1}{dt} =& \\frac{k_+}{2} \\cdot P_1^{2} - k_- \\cdot D_1 \\\\\n", - "\\frac{dD_2}{dt} =& \\frac{k_+}{2} \\cdot P_2^{2} - k_- \\cdot D_2 \\\\\n", - "\\frac{dT}{dt} =& k_+ \\cdot P_1 \\cdot P_2 - k_- \\cdot T \\\\\n", - "\\end{align}\n" - ], - "text/plain": [ - "L\"\\begin{align}\n", - "\\frac{dm_1}{dt} =& - \\delta \\cdot m_1 + \\gamma + \\frac{\\alpha \\cdot K^{n}}{K^{n} + D_2^{n}} \\\\\n", - "\\frac{dm_2}{dt} =& - \\delta \\cdot m_2 + \\gamma + \\frac{\\alpha \\cdot K^{n}}{K^{n} + D_1^{n}} \\\\\n", - "\\frac{dP_1}{dt} =& \\beta \\cdot m_1 - \\mu \\cdot P_1 -2 \\cdot \\frac{k_+}{2} \\cdot P_1^{2} + 2 \\cdot k_- \\cdot D_1 - k_+ \\cdot P_1 \\cdot P_2 + k_- \\cdot T \\\\\n", - "\\frac{dP_2}{dt} =& \\beta \\cdot m_2 - \\mu \\cdot P_2 -2 \\cdot \\frac{k_+}{2} \\cdot P_2^{2} + 2 \\cdot k_- \\cdot D_2 - k_+ \\cdot P_1 \\cdot P_2 + k_- \\cdot T \\\\\n", - "\\frac{dD_1}{dt} =& \\frac{k_+}{2} \\cdot P_1^{2} - k_- \\cdot D_1 \\\\\n", - "\\frac{dD_2}{dt} =& \\frac{k_+}{2} \\cdot P_2^{2} - k_- \\cdot D_2 \\\\\n", - "\\frac{dT}{dt} =& k_+ \\cdot P_1 \\cdot P_2 - k_- \\cdot T \\\\\n", - "\\end{align}\n", - "\"" - ] - }, - "execution_count": 25, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "latexify(rnmin)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For ODEs two other functions are generated by `addodes!`. `jacfun(rnmin)` will\n", - "return the generated Jacobian evaluation function, `fjac(dJ,u,p,t)`, which given\n", - "the current solution `u` evaluates the Jacobian within `dJ`.\n", - "`jacobianexprs(rnmin)` gives the corresponding matrix of expressions, which can\n", - "be used with Latexify to see the Jacobian:" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "data": { - "text/latex": [ - "\\begin{equation}\n", - "\\left[\n", - "\\begin{array}{ccccccc}\n", - " - \\delta & 0 & 0 & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot D_2^{-1 + n}}{\\left( K^{n} + D_2^{n} \\right)^{2}} & 0 \\\\\n", - "0 & - \\delta & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot D_1^{-1 + n}}{\\left( K^{n} + D_1^{n} \\right)^{2}} & 0 & 0 \\\\\n", - "\\beta & 0 & - \\mu - 2 \\cdot k_+ \\cdot P_1 - k_+ \\cdot P_2 & - k_+ \\cdot P_1 & 2 \\cdot k_- & 0 & k_{-} \\\\\n", - "0 & \\beta & - k_+ \\cdot P_2 & - \\mu - 2 \\cdot k_+ \\cdot P_2 - k_+ \\cdot P_1 & 0 & 2 \\cdot k_- & k_{-} \\\\\n", - "0 & 0 & k_+ \\cdot P_1 & 0 & - k_- & 0 & 0 \\\\\n", - "0 & 0 & 0 & k_+ \\cdot P_2 & 0 & - k_- & 0 \\\\\n", - "0 & 0 & k_+ \\cdot P_2 & k_+ \\cdot P_1 & 0 & 0 & - k_- \\\\\n", - "\\end{array}\n", - "\\right]\n", - "\\end{equation}\n" - ], - "text/plain": [ - "L\"\\begin{equation}\n", - "\\left[\n", - "\\begin{array}{ccccccc}\n", - " - \\delta & 0 & 0 & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot D_2^{-1 + n}}{\\left( K^{n} + D_2^{n} \\right)^{2}} & 0 \\\\\n", - "0 & - \\delta & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot D_1^{-1 + n}}{\\left( K^{n} + D_1^{n} \\right)^{2}} & 0 & 0 \\\\\n", - "\\beta & 0 & - \\mu - 2 \\cdot k_+ \\cdot P_1 - k_+ \\cdot P_2 & - k_+ \\cdot P_1 & 2 \\cdot k_- & 0 & k_{-} \\\\\n", - "0 & \\beta & - k_+ \\cdot P_2 & - \\mu - 2 \\cdot k_+ \\cdot P_2 - k_+ \\cdot P_1 & 0 & 2 \\cdot k_- & k_{-} \\\\\n", - "0 & 0 & k_+ \\cdot P_1 & 0 & - k_- & 0 & 0 \\\\\n", - "0 & 0 & 0 & k_+ \\cdot P_2 & 0 & - k_- & 0 \\\\\n", - "0 & 0 & k_+ \\cdot P_2 & k_+ \\cdot P_1 & 0 & 0 & - k_- \\\\\n", - "\\end{array}\n", - "\\right]\n", - "\\end{equation}\n", - "\"" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "latexify(jacobianexprs(rnmin))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`addodes!` also generates a function that evaluates the Jacobian of the ODE\n", - "derivative functions with respect to the parameters. `paramjacfun(rnmin)` then\n", - "returns the generated function. It has the form `fpjac(dPJ,u,p,t)`, which\n", - "given the current solution `u` evaluates the Jacobian matrix with respect to\n", - "parameters `p` within `dPJ`. For use in DifferentialEquations.jl solvers, an\n", - "[`ODEFunction`](http://docs.juliadiffeq.org/latest/features/performance_overloads.html)\n", - "representation of the ODEs is available from `odefun(rnmin)`. \n", - "\n", - "`addsdes!` and `addjumps!` work similarly to complete the network for use in\n", - "StochasticDiffEq and DiffEqJump solvers. \n", - "\n", - "#### Note on Using Generated Function and Expression API Functions\n", - "The generated functions and expressions accessible through the API require first\n", - "calling the appropriate `addodes!`, `addsdes` or `addjumps` function. These are\n", - "responsible for actually constructing the underlying functions and expressions.\n", - "The API accessors simply return already constructed functions and expressions\n", - "that are stored within the `reaction_network` structure.\n", - "\n", - "---\n", - "## Example of Generating a Network Programmatically\n", - "For a user directly typing in a reaction network, it is generally easier to use\n", - "the `@min_reaction_network` or `@reaction_network` macros to fully specify\n", - "reactions. However, for large, structured networks it can be much easier to\n", - "generate the network programmatically. For very large networks, with tens of\n", - "thousands of reactions, the form of `addreaction!` that uses stoichiometric\n", - "coefficients should be preferred as it offers substantially better performance.\n", - "To put together everything we've seen, let's generate the network corresponding\n", - "to a 1D continuous time random walk, approximating the diffusion of molecules\n", - "within an interval.\n", - "\n", - "The basic \"reaction\" network we wish to study is \n", - "\n", - "$$\n", - "u_1 \\leftrightarrows u_2 \\leftrightarrows u_3 \\cdots \\leftrightarrows u_{N}\n", - "$$\n", - "\n", - "for $N$ lattice sites on $[0,1]$. For $h = 1/N$ the lattice spacing, we'll\n", - "assume the rate molecules hop from their current site to any particular neighbor\n", - "is just $h^{-2}$. We can interpret this hopping process as a collection of\n", - "$2N-2$ \"reactions\", with the form $u_i \\to u_j$ for $j=i+1$ or $j=i-1$. We construct\n", - "the corresponding reaction network as follows. First we set values for the basic\n", - "parameters:" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0.015625" - ] - }, - "execution_count": 27, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "N = 64\n", - "h = 1 / N" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "then we create an empty network, and add each species" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [], - "source": [ - "rn = @empty_reaction_network\n", - "\n", - "for i = 1:N\n", - " addspecies!(rn, Symbol(:u, i))\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We next add one parameter `β`, which we will set equal to the hopping rate \n", - "of molecules, $h^{-2}$:" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [], - "source": [ - "addparam!(rn, :β)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, we add in the $2N-2$ possible hopping reactions:" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [], - "source": [ - "for i = 1:N\n", - " (i < N) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i+1)=>1,))\n", - " (i > 1) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i-1)=>1,))\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's first construct an ODE model for the network" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [], - "source": [ - "addodes!(rn)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now need to specify the initial condition, parameter vector and time interval\n", - "to solve on. We start with 10000 molecules placed at the center of the domain,\n", - "and setup an `ODEProblem` to solve:" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\u001b[36mODEProblem\u001b[0m with uType \u001b[36mArray{Float64,1}\u001b[0m and tType \u001b[36mFloat64\u001b[0m. In-place: \u001b[36mtrue\u001b[0m\n", - "timespan: (0.0, 0.01)\n", - "u0: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]" - ] - }, - "execution_count": 32, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "u₀ = zeros(N)\n", - "u₀[div(N,2)] = 10000\n", - "p = [1/(h*h)]\n", - "tspan = (0.,.01)\n", - "oprob = ODEProblem(rn, u₀, tspan, p)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We are now ready to solve the problem and plot the solution. Since we have\n", - "essentially generated a method of lines discretization of the diffusion equation\n", - "with a discontinuous initial condition, we'll use an A-L stable implicit ODE\n", - "solver, `Rodas5`, and plot the solution at a few times:" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n" - ] - }, - "execution_count": 33, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "sol = solve(oprob, Rodas5())\n", - "times = [0., .0001, .001, .01]\n", - "plt = plot()\n", - "for time in times\n", - " plot!(plt, 1:N, sol(time), fmt=fmt, xlabel=\"i\", ylabel=\"uᵢ\", label=string(\"t = \", time), lw=3)\n", - "end\n", - "plot(plt, ylims=(0.,10000.))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here we see the characteristic diffusion of molecules from the center of the\n", - "domain, resulting in a shortening and widening of the solution as $t$ increases.\n", - "\n", - "Let's now look at a stochastic chemical kinetics jump process version of the\n", - "model, where β gives the probability per time each molecule can hop from its\n", - "current lattice site to an individual neighboring site. We first add in the\n", - "jumps, disabling `regular_jumps` since they are not needed, and using the\n", - "`minimal_jumps` flag to construct a minimal representation of the needed jumps.\n", - "We then construct a `JumpProblem`, and use the Composition-Rejection Direct\n", - "method, `DirectCR`, to simulate the process of the molecules hopping about on\n", - "the lattice:" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "retcode: Default\n", - "Interpolation: Piecewise constant interpolation\n", - "t: 4-element Array{Float64,1}:\n", - " 0.0 \n", - " 0.0001\n", - " 0.001 \n", - " 0.01 \n", - "u: 4-element Array{Array{Int64,1},1}:\n", - " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0 … 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] \n", - " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0 … 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] \n", - " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0 … 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] \n", - " [0, 2, 4, 3, 4, 2, 12, 15, 24, 26 … 25, 16, 11, 7, 7, 4, 1, 1, 3, 1]" - ] - }, - "execution_count": 34, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "addjumps!(rn, build_regular_jumps=false, minimal_jumps=true)\n", - "\n", - "# make the initial condition integer valued \n", - "u₀ = zeros(Int, N)\n", - "u₀[div(N,2)] = 10000\n", - "\n", - "# setup and solve the problem\n", - "dprob = DiscreteProblem(rn, u₀, tspan, p)\n", - "jprob = JumpProblem(dprob, DirectCR(), rn, save_positions=(false,false))\n", - "jsol = solve(jprob, SSAStepper(), saveat=times)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now plot bar graphs showing the locations of the molecules at the same\n", - "set of times we examined the ODE solution. For comparison, we also plot the\n", - "corresponding ODE solutions (red lines) that we found:" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "\n" - ] - }, - "execution_count": 35, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "times = [0., .0001, .001, .01]\n", - "plts = []\n", - "for i = 1:4\n", - " b = bar(1:N, jsol[i], legend=false, fmt=fmt, xlabel=\"i\", ylabel=\"uᵢ\", title=string(\"t = \", times[i]))\n", - " plot!(b,sol(times[i]))\n", - " push!(plts,b)\n", - "end\n", - "plot(plts...)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Similar to the ODE solutions, we see that the molecules spread out and become\n", - "more and more well-mixed throughout the domain as $t$ increases. The simulation\n", - "results are noisy due to the finite numbers of molecules present in the\n", - "stochsatic simulation, but since the number of molecules is large they agree\n", - "well with the ODE solution at each time.\n", - "\n", - "---\n", - "## Getting Help\n", - "Have a question related to DiffEqBiological or this tutorial? Feel free to ask\n", - "in the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby).\n", - "If you think you've found a bug in DiffEqBiological, or would like to\n", - "request/discuss new functionality, feel free to open an issue on\n", - "[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check\n", - "there is no related issue already open). If you've found a bug in this tutorial,\n", - "or have a suggestion, feel free to open an issue on the [DiffEqTutorials Github\n", - "site](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull\n", - "request to DiffEqTutorials updating the tutorial!\n", - "\n", - "---" - ] - } - ], - "metadata": { - "@webio": { - "lastCommId": null, - "lastKernelId": null - }, - "kernelspec": { - "display_name": "Julia 1.1.1", - "language": "julia", - "name": "julia-1.1" - }, - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebook/models/04b-diffeqbio_III_steadystates.ipynb b/notebook/models/04b-diffeqbio_III_steadystates.ipynb deleted file mode 100644 index b2e4abe1..00000000 --- a/notebook/models/04b-diffeqbio_III_steadystates.ipynb +++ /dev/null @@ -1,259 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# DiffEqBiological Tutorial III: Steady-States and Bifurcations\n### Torkel Loman and Samuel Isaacson\n\nSeveral types of steady state analysis can be performed for networks defined\nwith DiffEqBiological by utilizing homotopy continuation. This allows for\nfinding the steady states and bifurcations within a large class of systems. In\nthis tutorial we'll go through several examples of using this functionality.\n\nWe start by loading the necessary packages:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DiffEqBiological, Plots\ngr(); default(fmt = :png);" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Steady states and stability of a biochemical reaction network.\nBistable switches are well known biological motifs, characterised by the\npresence of two different stable steady states." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "bistable_switch = @reaction_network begin\n d, (X,Y) → ∅\n hillR(Y,v1,K1,n1), ∅ → X\n hillR(X,v2,K2,n2), ∅ → Y\nend d v1 K1 n1 v2 K2 n2\nd = 0.01;\nv1 = 1.5; K1 = 30; n1 = 3;\nv2 = 1.; K2 = 30; n2 = 3;\nbistable_switch_p = [d, v1 ,K1, n1, v2, K2, n2];" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The steady states can be found using the `steady_states` function (which takes a reaction network and a set of parameter values as input). The stability of these steady states can be found using the `stability` function." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "ss = steady_states(bistable_switch, bistable_switch_p)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "stability(ss,bistable_switch, bistable_switch_p)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Since the equilibration methodology is based on homotopy continuation, it is not\nable to handle systems with non-integer exponents, or non polynomial reaction\nrates. Neither of the following two systems will work.\n\nThis system contains a non-integer exponent:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "rn1 = @reaction_network begin\n p, ∅ → X\n hill(X,v,K,n), X → ∅\nend p v K n\np1 = [1.,2.5,1.5,1.5]\nsteady_states(rn1,p1)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This system contains a logarithmic reaction rate:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "rn2 = @reaction_network begin\n p, ∅ → X\n log(X), X → ∅\nend p\np2 = [1.]\nsteady_states(rn2,p2)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Bifurcation diagrams for biochemical reaction networks\nBifurcation diagrams illustrate how the steady states of a system depend on one\nor more parameters. They can be computed with the `bifurcations` function. It\ntakes the same arguments as `steady_states`, with the addition of the parameter\none wants to vary, and an interval over which to vary it:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "bif = bifurcations(bistable_switch, bistable_switch_p, :v1, (.1,5.))\nplot(bif,ylabel=\"[X]\",label=\"\")\nplot!([[],[]],color=[:blue :red],label = [\"Stable\" \"Unstable\"])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The values for the second variable in the system can also be displayed, by\ngiving that as an additional input to `plot` (it is the second argument, directly\nafter the bifurcation diagram object):" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(bif,2,ylabel=\"[Y]\")\nplot!([[],[]],color=[:blue :red],label = [\"Stable\" \"Unstable\"])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The `plot` function also accepts all other arguments which the Plots.jl `plot` function accepts." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "bif = bifurcations(bistable_switch, bistable_switch_p,:v1,(.1,10.))\nplot(bif,linewidth=1.,title=\"A bifurcation diagram\",ylabel=\"Steady State concentration\")\nplot!([[],[]],color=[:blue :red],label = [\"Stable\" \"Unstable\"])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Certain parameters, like `n1`, cannot be sensibly varied over a continuous\ninterval. Instead, a discrete bifurcation diagram can be calculated with the\n`bifurcation_grid` function. Instead of an interval, the last argument is a\nrange of numbers:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "bif = bifurcation_grid(bistable_switch, bistable_switch_p,:n1,1.:5.)\nplot(bif)\nscatter!([[],[]],color=[:blue :red],label = [\"Stable\" \"Unstable\"])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Bifurcation diagrams over two dimensions\nIn addition to the bifurcation diagrams illustrated above, where only a single\nvariable is varied, it is also possible to investigate the steady state\nproperties of s system as two different parameters are varied. Due to the nature\nof the underlying bifurcation algorithm it is not possible to continuously vary\nboth parameters. Instead, a set of discrete values are selected for the first\nparameter, and a continuous interval for the second. Next, for each discrete\nvalue of the first parameter, a normal bifurcation diagram is created over the\ninterval given for the second parameter." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "bif = bifurcation_grid_diagram(bistable_switch, bistable_switch_p,:n1,0.:4.,:v1,(.1,5.))\nplot(bif)\nplot!([[],[]],color=[:blue :red],label = [\"Stable\" \"Unstable\"])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "In the single variable case we could use a `bifurcation_grid` to investigate the\nbehavior of a parameter which could only attain discrete values. In the same\nway, if we are interested in two parameters, both of which require integer\nvalues, we can use `bifrucation_grid_2d`. In our case, this is required if we\nwant to vary both the parameters `n1` and `n2`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "bif = bifurcation_grid_2d(bistable_switch, bistable_switch_p,:n1,1.:3.,:n2,1.:10.)\nplot(bif)\nscatter!([[],[]],color=[:blue :red],label = [\"Stable\" \"Unstable\"])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### The Brusselator\nThe Brusselator is a well know reaction network, which may or may not oscillate,\ndepending on parameter values." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "brusselator = @reaction_network begin\n A, ∅ → X\n 1, 2X + Y → 3X\n B, X → Y\n 1, X → ∅\nend A B;\nA = 0.5; B = 4.;\nbrusselator_p = [A, B];" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The system has only one steady state, for $(X,Y)=(A,B/A)$ This fixed point\nbecomes unstable when $B > 1+A^2$, leading to oscillations. Bifurcation diagrams\ncan be used to determine the system's stability, and hence look for where oscillations might appear in the Brusselator:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "bif = bifurcations(brusselator,brusselator_p,:B,(0.1,2.5))\nplot(bif,2)\nplot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = [\"Stable Real\" \"Stable Complex\" \"Unstable Complex\" \"Unstable Real\"])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here red and yellow colors label unstable steady-states, while blue and cyan\nlabel stable steady-states. (In addition, yellow and cyan correspond to points\nwhere at least one eigenvalue of the Jacobian is imaginary, while red and blue\ncorrespond to points with real-valued eigenvalues.)\n\nGiven `A=0.5`, the point at which the system should become unstable is `B=1.25`. We can confirm this in the bifurcation diagram.\n\nWe can also investigate the behavior when we vary both parameters of the system:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "bif = bifurcation_grid_diagram(brusselator,brusselator_p,:B,0.5:0.02:5.0,:A,(0.2,5.0))\nplot(bif)\nplot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = [\"Stable Real\" \"Stable Complex\" \"Unstable Complex\" \"Unstable Real\"])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "---\n## Getting Help\nHave a question related to DiffEqBiological or this tutorial? Feel free to ask\nin the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby).\nIf you think you've found a bug in DiffEqBiological, or would like to\nrequest/discuss new functionality, feel free to open an issue on\n[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check\nthere is no related issue already open). If you've found a bug in this tutorial,\nor have a suggestion, feel free to open an issue on the [DiffEqTutorials Github\nsite](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull\nrequest to DiffEqTutorials updating the tutorial!\n\n---" - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.2.0" - }, - "kernelspec": { - "name": "julia-1.2", - "display_name": "Julia 1.2.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/models/05-kepler_problem.ipynb b/notebook/models/05-kepler_problem.ipynb deleted file mode 100644 index 39531561..00000000 --- a/notebook/models/05-kepler_problem.ipynb +++ /dev/null @@ -1,179 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Kepler Problem\n### Yingbo Ma, Chris Rackauckas\n\nThe Hamiltonian $\\mathcal {H}$ and the angular momentum $L$ for the Kepler problem are\n\n$$\\mathcal {H} = \\frac{1}{2}(\\dot{q}^2_1+\\dot{q}^2_2)-\\frac{1}{\\sqrt{q^2_1+q^2_2}},\\quad\nL = q_1\\dot{q_2} - \\dot{q_1}q_2$$\n\nAlso, we know that\n\n$${\\displaystyle {\\frac {\\mathrm {d} {\\boldsymbol {p}}}{\\mathrm {d} t}}=-{\\frac {\\partial {\\mathcal {H}}}{\\partial {\\boldsymbol {q}}}}\\quad ,\\quad {\\frac {\\mathrm {d} {\\boldsymbol {q}}}{\\mathrm {d} t}}=+{\\frac {\\partial {\\mathcal {H}}}{\\partial {\\boldsymbol {p}}}}}$$" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using OrdinaryDiffEq, LinearAlgebra, ForwardDiff, Plots; gr()\nH(q,p) = norm(p)^2/2 - inv(norm(q))\nL(q,p) = q[1]*p[2] - p[1]*q[2]\n\npdot(dp,p,q,params,t) = ForwardDiff.gradient!(dp, q->-H(q, p), q)\nqdot(dq,p,q,params,t) = ForwardDiff.gradient!(dq, p-> H(q, p), p)\n\ninitial_position = [.4, 0]\ninitial_velocity = [0., 2.]\ninitial_cond = (initial_position, initial_velocity)\ninitial_first_integrals = (H(initial_cond...), L(initial_cond...))\ntspan = (0,20.)\nprob = DynamicalODEProblem(pdot, qdot, initial_velocity, initial_position, tspan)\nsol = solve(prob, KahanLi6(), dt=1//10);" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's plot the orbit and check the energy and angular momentum variation. We know that energy and angular momentum should be constant, and they are also called first integrals." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot_orbit(sol) = plot(sol,vars=(3,4), lab=\"Orbit\", title=\"Kepler Problem Solution\")\n\nfunction plot_first_integrals(sol, H, L)\n plot(initial_first_integrals[1].-map(u->H(u[2,:], u[1,:]), sol.u), lab=\"Energy variation\", title=\"First Integrals\")\n plot!(initial_first_integrals[2].-map(u->L(u[2,:], u[1,:]), sol.u), lab=\"Angular momentum variation\")\nend\nanalysis_plot(sol, H, L) = plot(plot_orbit(sol), plot_first_integrals(sol, H, L))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "analysis_plot(sol, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's try to use a Runge-Kutta-Nyström solver to solve this problem and check the first integrals' variation." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol2 = solve(prob, DPRKN6()) # dt is not necessary, because unlike symplectic\n # integrators DPRKN6 is adaptive\n@show sol2.u |> length\nanalysis_plot(sol2, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's then try to solve the same problem by the `ERKN4` solver, which is specialized for sinusoid-like periodic function" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol3 = solve(prob, ERKN4()) # dt is not necessary, because unlike symplectic\n # integrators ERKN4 is adaptive\n@show sol3.u |> length\nanalysis_plot(sol3, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can see that `ERKN4` does a bad job for this problem, because this problem is not sinusoid-like.\n\nOne advantage of using `DynamicalODEProblem` is that it can implicitly convert the second order ODE problem to a *normal* system of first order ODEs, which is solvable for other ODE solvers. Let's use the `Tsit5` solver for the next example." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol4 = solve(prob, Tsit5())\n@show sol4.u |> length\nanalysis_plot(sol4, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### Note\n\nThere is drifting for all the solutions, and high order methods are drifting less because they are more accurate.\n\n### Conclusion\n\n---\n\nSymplectic integrator does not conserve the energy completely at all time, but the energy can come back. In order to make sure that the energy fluctuation comes back eventually, symplectic integrator has to have a fixed time step. Despite the energy variation, symplectic integrator conserves the angular momentum perfectly.\n\nBoth Runge-Kutta-Nyström and Runge-Kutta integrator do not conserve energy nor the angular momentum, and the first integrals do not tend to come back. An advantage Runge-Kutta-Nyström integrator over symplectic integrator is that RKN integrator can have adaptivity. An advantage Runge-Kutta-Nyström integrator over Runge-Kutta integrator is that RKN integrator has less function evaluation per step. The `ERKN4` solver works best for sinusoid-like solutions.\n\n## Manifold Projection\n\nIn this example, we know that energy and angular momentum should be conserved. We can achieve this through mainfold projection. As the name implies, it is a procedure to project the ODE solution to a manifold. Let's start with a base case, where mainfold projection isn't being used." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DiffEqCallbacks\n\nplot_orbit2(sol) = plot(sol,vars=(1,2), lab=\"Orbit\", title=\"Kepler Problem Solution\")\n\nfunction plot_first_integrals2(sol, H, L)\n plot(initial_first_integrals[1].-map(u->H(u[1:2],u[3:4]), sol.u), lab=\"Energy variation\", title=\"First Integrals\")\n plot!(initial_first_integrals[2].-map(u->L(u[1:2],u[3:4]), sol.u), lab=\"Angular momentum variation\")\nend\n\nanalysis_plot2(sol, H, L) = plot(plot_orbit2(sol), plot_first_integrals2(sol, H, L))\n\nfunction hamiltonian(du,u,params,t)\n q, p = u[1:2], u[3:4]\n qdot(@view(du[1:2]), p, q, params, t)\n pdot(@view(du[3:4]), p, q, params, t)\nend\n\nprob2 = ODEProblem(hamiltonian, [initial_position; initial_velocity], tspan)\nsol_ = solve(prob2, RK4(), dt=1//5, adaptive=false)\nanalysis_plot2(sol_, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "There is a significant fluctuation in the first integrals, when there is no mainfold projection." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function first_integrals_manifold(residual,u)\n residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4])\n residual[3:4] .= initial_first_integrals[2] - L(u[1:2], u[3:4])\nend\n\ncb = ManifoldProjection(first_integrals_manifold)\nsol5 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=cb)\nanalysis_plot2(sol5, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can see that thanks to the manifold projection, the first integrals' variation is very small, although we are using `RK4` which is not symplectic. But wait, what if we only project to the energy conservation manifold?" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function energy_manifold(residual,u)\n residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4])\n residual[3:4] .= 0\nend\nenergy_cb = ManifoldProjection(energy_manifold)\nsol6 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=energy_cb)\nanalysis_plot2(sol6, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "There is almost no energy variation but angular momentum varies quite bit. How about only project to the angular momentum conservation manifold?" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function angular_manifold(residual,u)\n residual[1:2] .= initial_first_integrals[2] - L(u[1:2], u[3:4])\n residual[3:4] .= 0\nend\nangular_cb = ManifoldProjection(angular_manifold)\nsol7 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=angular_cb)\nanalysis_plot2(sol7, H, L)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Again, we see what we expect." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/models/06-pendulum_bayesian_inference.ipynb b/notebook/models/06-pendulum_bayesian_inference.ipynb deleted file mode 100644 index 60f5fc3c..00000000 --- a/notebook/models/06-pendulum_bayesian_inference.ipynb +++ /dev/null @@ -1,170 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Bayesian Inference on a Pendulum using Turing.jl\n### Vaibhav Dixit\n\n### Set up simple pendulum problem" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DiffEqBayes, OrdinaryDiffEq, RecursiveArrayTools, Distributions, Plots, StatsPlots" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's define our simple pendulum problem. Here our pendulum has a drag term `ω`\nand a length `L`.\n\n![pendulum](https://user-images.githubusercontent.com/1814174/59942945-059c1680-942f-11e9-991c-2025e6e4ccd3.jpg)\n\nWe get first order equations by defining the first term as the velocity and the\nsecond term as the position, getting:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function pendulum(du,u,p,t)\n ω,L = p\n x,y = u\n du[1] = y\n du[2] = - ω*y -(9.8/L)*sin(x)\nend\n\nu0 = [1.0,0.1]\ntspan = (0.0,10.0)\nprob1 = ODEProblem(pendulum,u0,tspan,[1.0,2.5])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Solve the model and plot\n\nTo understand the model and generate data, let's solve and visualize the solution\nwith the known parameters:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(prob1,Tsit5())\nplot(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "It's the pendulum, so you know what it looks like. It's periodic, but since we\nhave not made a small angle assumption it's not exactly `sin` or `cos`. Because\nthe true dampening parameter `ω` is 1, the solution does not decay over time,\nnor does it increase. The length `L` determines the period.\n\n### Create some dummy data to use for estimation\n\nWe now generate some dummy data to use for estimation" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "t = collect(range(1,stop=10,length=10))\nrandomized = VectorOfArray([(sol(t[i]) + .01randn(2)) for i in 1:length(t)])\ndata = convert(Array,randomized)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's see what our data looks like on top of the real solution" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "scatter!(data')" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This data captures the non-dampening effect and the true period, making it\nperfect to attempting a Bayesian inference.\n\n### Perform Bayesian Estimation\n\nNow let's fit the pendulum to the data. Since we know our model is correct,\nthis should give us back the parameters that we used to generate the data!\nDefine priors on our parameters. In this case, let's assume we don't have much\ninformation, but have a prior belief that ω is between 0.1 and 3.0, while the\nlength of the pendulum L is probably around 3.0:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "priors = [Uniform(0.1,3.0), Normal(3.0,1.0)]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Finally let's run the estimation routine from DiffEqBayes.jl using the Turing.jl backend" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "bayesian_result = turing_inference(prob1,Tsit5(),t,data,priors;num_samples=10_000,\n syms = [:omega,:L])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that while our guesses had the wrong means, the learned parameters converged\nto the correct means, meaning that it learned good posterior distributions for the\nparameters. To look at these posterior distributions on the parameters, we can\nexamine the chains:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(bayesian_result)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "As a diagnostic, we will also check the parameter chains. The chain is the MCMC\nsampling process. The chain should explore parameter space and converge reasonably\nwell, and we should be taking a lot of samples after it converges (it is these\nsamples that form the posterior distribution!)" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(bayesian_result, colordim = :parameter)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that after awhile these chains converge to a \"fuzzy line\", meaning it\nfound the area with the most likelihood and then starts to sample around there,\nwhich builds a posterior distribution around the true mean." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/models/07-outer_solar_system.ipynb b/notebook/models/07-outer_solar_system.ipynb deleted file mode 100644 index 17a09c02..00000000 --- a/notebook/models/07-outer_solar_system.ipynb +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# The Outer Solar System\n### Yingbo Ma, Chris Rackauckas\n\n## Data\n\nThe chosen units are: masses relative to the sun, so that the sun has mass $1$. We have taken $m_0 = 1.00000597682$ to take account of the inner planets. Distances are in astronomical units , times in earth days, and the gravitational constant is thus $G = 2.95912208286 \\cdot 10^{-4}$.\n\n| planet | mass | initial position | initial velocity |\n| --- | --- | --- | --- |\n| Jupiter | $m_1 = 0.000954786104043$ | | \n| Saturn | $m_2 = 0.000285583733151$ | | \n| Uranus | $m_3 = 0.0000437273164546$ | | \n| Neptune | $m_4 = 0.0000517759138449$ | | \n| Pluto | $ m_5 = 1/(1.3 \\cdot 10^8 )$ | | \n\nThe data is taken from the book \"Geometric Numerical Integration\" by E. Hairer, C. Lubich and G. Wanner." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots, OrdinaryDiffEq, DiffEqPhysics, RecursiveArrayTools\ngr()\n\nG = 2.95912208286e-4\nM = [1.00000597682, 0.000954786104043, 0.000285583733151, 0.0000437273164546, 0.0000517759138449, 1/1.3e8]\nplanets = [\"Sun\", \"Jupiter\", \"Saturn\", \"Uranus\", \"Neptune\", \"Pluto\"]\n\npos_x = [0.0,-3.5023653,9.0755314,8.3101420,11.4707666,-15.5387357]\npos_y = [0.0,-3.8169847,-3.0458353,-16.2901086,-25.7294829,-25.2225594]\npos_z = [0.0,-1.5507963,-1.6483708,-7.2521278,-10.8169456,-3.1902382]\npos = ArrayPartition(pos_x,pos_y,pos_z)\n\nvel_x = [0.0,0.00565429,0.00168318,0.00354178,0.00288930,0.00276725]\nvel_y = [0.0,-0.00412490,0.00483525,0.00137102,0.00114527,-0.00170702]\nvel_z = [0.0,-0.00190589,0.00192462,0.00055029,0.00039677,-0.00136504]\nvel = ArrayPartition(vel_x,vel_y,vel_z)\n\ntspan = (0.,200_000)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The N-body problem's Hamiltonian is\n\n$$H(p,q) = \\frac{1}{2}\\sum_{i=0}^{N}\\frac{p_{i}^{T}p_{i}}{m_{i}} - G\\sum_{i=1}^{N}\\sum_{j=0}^{i-1}\\frac{m_{i}m_{j}}{\\left\\lVert q_{i}-q_{j} \\right\\rVert}$$\n\nHere, we want to solve for the motion of the five outer planets relative to the sun, namely, Jupiter, Saturn, Uranus, Neptune and Pluto." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "const ∑ = sum\nconst N = 6\npotential(p, t, x, y, z, M) = -G*∑(i->∑(j->(M[i]*M[j])/sqrt((x[i]-x[j])^2 + (y[i]-y[j])^2 + (z[i]-z[j])^2), 1:i-1), 2:N)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Hamiltonian System\n\n`NBodyProblem` constructs a second order ODE problem under the hood. We know that a Hamiltonian system has the form of\n\n$$\\dot{p} = -H_{q}(p,q)\\quad \\dot{q}=H_{p}(p,q)$$\n\nFor an N-body system, we can symplify this as:\n\n$$\\dot{p} = -\\nabla{V}(q)\\quad \\dot{q}=M^{-1}p.$$\n\nThus $\\dot{q}$ is defined by the masses. We only need to define $\\dot{p}$, and this is done internally by taking the gradient of $V$. Therefore, we only need to pass the potential function and the rest is taken care of." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "nprob = NBodyProblem(potential, M, pos, vel, tspan)\nsol = solve(nprob,Yoshida6(), dt=100);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "orbitplot(sol,body_names=planets)" - ], - "metadata": {}, - "execution_count": null - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/ode_extras/01-ModelingToolkit.ipynb b/notebook/ode_extras/01-ModelingToolkit.ipynb deleted file mode 100644 index 8bed8b04..00000000 --- a/notebook/ode_extras/01-ModelingToolkit.ipynb +++ /dev/null @@ -1,405 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# ModelingToolkit.jl, An IR and Compiler for Scientific Models\n### Chris Rackauckas\n\nA lot of people are building modeling languages for their specific domains. However, while the syntax my vary greatly between these domain-specific languages (DSLs), the internals of modeling frameworks are surprisingly similar: building differential equations, calculating Jacobians, etc.\n\n#### ModelingToolkit.jl is metamodeling systemitized\n\nAfter building our third modeling interface, we realized that this problem can be better approached by having a reusable internal structure which DSLs can target. This internal is ModelingToolkit.jl: an Intermediate Representation (IR) with a well-defined interface for defining system transformations and compiling to Julia functions for use in numerical libraries. Now a DSL can easily be written by simply defining the translation to ModelingToolkit.jl's primatives and querying for the mathematical quantities one needs.\n\n### Basic usage: defining differential equation systems, with performance!\n\nLet's explore the IR itself. ModelingToolkit.jl is friendly to use, and can used as a symbolic DSL in its own right. Let's define and solve the Lorenz differential equation system using ModelingToolkit to generate the functions:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using ModelingToolkit\n\n### Define a differential equation system\n\n@parameters t σ ρ β\n@variables x(t) y(t) z(t)\n@derivatives D'~t\n\neqs = [D(x) ~ σ*(y-x),\n D(y) ~ x*(ρ-z)-y,\n D(z) ~ x*y - β*z]\nde = ODESystem(eqs)\node_f = ODEFunction(de, [x,y,z], [σ,ρ,β])\n\n### Use in DifferentialEquations.jl\n\nusing OrdinaryDiffEq\nu₀ = ones(3)\ntspan = (0.0,100.0)\np = [10.0,28.0,10/3]\nprob = ODEProblem(ode_f,u₀,tspan,p)\nsol = solve(prob,Tsit5())\n\nusing Plots\nplot(sol,vars=(1,2,3))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### ModelingToolkit is a compiler for mathematical systems\n\nAt its core, ModelingToolkit is a compiler. It's IR is its type system, and its output are Julia functions (it's a compiler for Julia code to Julia code, written in Julia).\n\nDifferentialEquations.jl wants a function `f(du,u,p,t)` for defining an ODE system, which is what ModelingToolkit.jl is building." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "generate_function(de, [x,y,z], [σ,ρ,β])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "A special syntax in DifferentialEquations.jl for small static ODE systems uses `f(u,p,t)`, which can be generated as well:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "generate_function(de, [x,y,z], [σ,ρ,β]; version=ModelingToolkit.SArrayFunction)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "ModelingToolkit.jl can be used to calculate the Jacobian of the differential equation system:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "jac = calculate_jacobian(de)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "It will automatically generate functions for using this Jacobian within the stiff ODE solvers for faster solving:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "jac_expr = generate_jacobian(de)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "It can even do fancy linear algebra. Stiff ODE solvers need to perform an LU-factorization which is their most expensive part. But ModelingToolkit.jl can skip this operation and instead generate the analytical solution to a matrix factorization, and build a Julia function for directly computing the factorization, which is then optimized in LLVM compiler passes." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "ModelingToolkit.generate_factorized_W(de)[1]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Solving Nonlinear systems\n\nModelingToolkit.jl is not just for differential equations. It can be used for any mathematical target that is representable by its IR. For example, let's solve a rootfinding problem `F(x)=0`. What we do is define a nonlinear system and generate a function for use in NLsolve.jl" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@variables x y z\n@parameters σ ρ β\n\n# Define a nonlinear system\neqs = [0 ~ σ*(y-x),\n 0 ~ x*(ρ-z)-y,\n 0 ~ x*y - β*z]\nns = NonlinearSystem(eqs, [x,y,z])\nnlsys_func = generate_function(ns, [x,y,z], [σ,ρ,β])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can then tell ModelingToolkit.jl to compile this function for use in NLsolve.jl, and then numerically solve the rootfinding problem:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "nl_f = @eval eval(nlsys_func)\n# Make a closure over the parameters for for NLsolve.jl\nf2 = (du,u) -> nl_f(du,u,(10.0,26.0,2.33))\n\nusing NLsolve\nnlsolve(f2,ones(3))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Library of transformations on mathematical systems\n\nThe reason for using ModelingToolkit is not just for defining performant Julia functions for solving systems, but also for performing mathematical transformations which may be required in order to numerically solve the system. For example, let's solve a third order ODE. The way this is done is by transforming the third order ODE into a first order ODE, and then solving the resulting ODE. This transformation is given by the `ode_order_lowering` function." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@derivatives D3'''~t\n@derivatives D2''~t\n@variables u(t), x(t)\neqs = [D3(u) ~ 2(D2(u)) + D(u) + D(x) + 1\n D2(x) ~ D(x) + 2]\nde = ODESystem(eqs)\nde1 = ode_order_lowering(de)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "de1.eqs" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This has generated a system of 5 first order ODE systems which can now be used in the ODE solvers.\n\n### Linear Algebra... for free?\n\nLet's take a look at how to extend ModelingToolkit.jl in new directions. Let's define a Jacobian just by using the derivative primatives by hand:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@parameters t σ ρ β\n@variables x(t) y(t) z(t)\n@derivatives D'~t Dx'~x Dy'~y Dz'~z\neqs = [D(x) ~ σ*(y-x),\n D(y) ~ x*(ρ-z)-y,\n D(z) ~ x*y - β*z]\nJ = [Dx(eqs[1].rhs) Dy(eqs[1].rhs) Dz(eqs[1].rhs)\n Dx(eqs[2].rhs) Dy(eqs[2].rhs) Dz(eqs[2].rhs)\n Dx(eqs[3].rhs) Dy(eqs[3].rhs) Dz(eqs[3].rhs)]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that this writes the derivatives in a \"lazy\" manner. If we want to actually compute the derivatives, we can expand out those expressions:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "J = expand_derivatives.(J)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here's the magic of ModelingToolkit.jl: **Julia treats ModelingToolkit expressions like a Number, and so generic numerical functions are directly usable on ModelingToolkit expressions!** Let's compute the LU-factorization of this Jacobian we defined using Julia's Base linear algebra library." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using LinearAlgebra\nluJ = lu(J)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "luJ.L" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and the inverse?" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "invJ = inv(J)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### Thus ModelingToolkit.jl can utilize existing numerical code on symbolic codes\n\nLet's follow this thread a little deeper.\n\n### Automatically convert numerical codes to symbolic\n\nLet's take someone's code written to numerically solve the Lorenz equation:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function lorenz(du,u,p,t)\n du[1] = p[1]*(u[2]-u[1])\n du[2] = u[1]*(p[2]-u[3]) - u[2]\n du[3] = u[1]*u[2] - p[3]*u[3]\nend" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Since ModelingToolkit can trace generic numerical functions in Julia, let's trace it with Operations. When we do this, it'll spit out a symbolic representation of their numerical code:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "u = [x,y,z]\ndu = similar(u)\np = [σ,ρ,β]\nlorenz(du,u,p,t)\ndu" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can then perform symbolic manipulations on their numerical code, and build a new numerical code that optimizes/fixes their original function!" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "J = [Dx(du[1]) Dy(du[1]) Dz(du[1])\n Dx(du[2]) Dy(du[2]) Dz(du[2])\n Dx(du[3]) Dy(du[3]) Dz(du[3])]\nJ = expand_derivatives.(J)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Automated Sparsity Detection\n\nIn many cases one has to speed up large modeling frameworks by taking into account sparsity. While ModelingToolkit.jl can be used to compute Jacobians, we can write a standard Julia function in order to get a spase matrix of expressions which automatically detects and utilizes the sparsity of their function." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using SparseArrays\nfunction SparseArrays.SparseMatrixCSC(M::Matrix{T}) where {T<:ModelingToolkit.Expression}\n idxs = findall(!iszero, M)\n I = [i[1] for i in idxs]\n J = [i[2] for i in idxs]\n V = [M[i] for i in idxs]\n return SparseArrays.sparse_IJ_sorted!(I, J, V, size(M)...)\nend\nsJ = SparseMatrixCSC(J)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Dependent Variables, Functions, Chain Rule\n\n\"Variables\" are overloaded. When you are solving a differential equation, the variable `u(t)` is actually a function of time. In order to handle these kinds of variables in a mathematically correct and extensible manner, the ModelingToolkit IR actually treats variables as functions, and constant variables are simply 0-ary functions (`t()`).\n\nWe can utilize this idea to have parameters that are also functions. For example, we can have a parameter σ which acts as a function of 1 argument, and then utilize this function within our differential equations:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@parameters σ(..)\neqs = [D(x) ~ σ(t-1)*(y-x),\n D(y) ~ x*(σ(t^2)-z)-y,\n D(z) ~ x*y - β*z]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that when we calculate the derivative with respect to `t`, the chain rule is automatically handled:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "@derivatives Dₜ'~t\nDₜ(x*(σ(t^2)-z)-y)\nexpand_derivatives(Dₜ(x*(σ(t^2)-z)-y))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Hackability: Extend directly from the language\n\nModelingToolkit.jl is written in Julia, and thus it can be directly extended from Julia itself. Let's define a normal Julia function and call it with a variable:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "_f(x) = 2x + x^2\n_f(x)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Recall that when we do that, it will automatically trace this function and then build a symbolic expression. But what if we wanted our function to be a primative in the symbolic framework? This can be done by registering the function." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f(x) = 2x + x^2\n@register f(x)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now this function is a new primitive:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f(x)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and we can now define derivatives of our function:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "function ModelingToolkit.derivative(::typeof(f), args::NTuple{1,Any}, ::Val{1})\n 2 + 2args[1]\nend\nexpand_derivatives(Dx(f(x)))" - ], - "metadata": {}, - "execution_count": null - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/ode_extras/02-feagin.ipynb b/notebook/ode_extras/02-feagin.ipynb deleted file mode 100644 index d6bd08f6..00000000 --- a/notebook/ode_extras/02-feagin.ipynb +++ /dev/null @@ -1,115 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Feagin's Order 10, 12, and 14 Methods\n### Chris Rackauckas\n\nDifferentialEquations.jl includes Feagin's explicit Runge-Kutta methods of orders 10/8, 12/10, and 14/12. These methods have such high order that it's pretty much required that one uses numbers with more precision than Float64. As a prerequisite reference on how to use arbitrary number systems (including higher precision) in the numerical solvers, please see the Solving Equations in With Chosen Number Types notebook.\n\n## Investigation of the Method's Error\n\nWe can use Feagin's order 16 method as follows. Let's use a two-dimensional linear ODE. Like in the Solving Equations in With Chosen Number Types notebook, we change the initial condition to BigFloats to tell the solver to use BigFloat types." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations\nconst linear_bigα = big(1.01)\nf(u,p,t) = (linear_bigα*u)\n\n# Add analytical solution so that errors are checked\nf_analytic(u0,p,t) = u0*exp(linear_bigα*t)\nff = ODEFunction(f,analytic=f_analytic)\nprob = ODEProblem(ff,big(0.5),(0.0,1.0))\nsol = solve(prob,Feagin14(),dt=1//16,adaptive=false);" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "println(sol.errors)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Compare that to machine $\\epsilon$ for Float64:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "eps(Float64)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The error for Feagin's method when the stepsize is 1/16 is 8 orders of magnitude below machine $\\epsilon$! However, that is dependent on the stepsize. If we instead use adaptive timestepping with the default tolerances, we get" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol =solve(prob,Feagin14());\nprintln(sol.errors); print(\"The length was $(length(sol))\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that when the stepsize is much higher, the error goes up quickly as well. These super high order methods are best when used to gain really accurate approximations (using still modest timesteps). Some examples of where such precision is necessary is astrodynamics where the many-body problem is highly chaotic and thus sensitive to small errors.\n\n## Convergence Test\n\nThe Order 14 method is awesome, but we need to make sure it's really that awesome. The following convergence test is used in the package tests in order to make sure the implementation is correct. Note that all methods have such tests in place." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DiffEqDevTools\ndts = 1.0 ./ 2.0 .^(10:-1:4)\nsim = test_convergence(dts,prob,Feagin14())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "For a view of what's going on, let's plot the simulation results." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots\ngr()\nplot(sim)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This is a clear trend indicating that the convergence is truly Order 14, which\nis the estimated slope." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/ode_extras/03-ode_minmax.ipynb b/notebook/ode_extras/03-ode_minmax.ipynb deleted file mode 100644 index 93697719..00000000 --- a/notebook/ode_extras/03-ode_minmax.ipynb +++ /dev/null @@ -1,197 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Finding Maxima and Minima of DiffEq Solutions\n### Chris Rackauckas\n\n### Setup\n\nIn this tutorial we will show how to use Optim.jl to find the maxima and minima of solutions. Let's take a look at the double pendulum:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "#Constants and setup\nusing OrdinaryDiffEq\ninitial = [0.01, 0.01, 0.01, 0.01]\ntspan = (0.,100.)\n\n#Define the problem\nfunction double_pendulum_hamiltonian(udot,u,p,t)\n α = u[1]\n lα = u[2]\n β = u[3]\n lβ = u[4]\n udot .=\n [2(lα-(1+cos(β))lβ)/(3-cos(2β)),\n -2sin(α) - sin(α+β),\n 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)),\n -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)]\nend\n\n#Pass to solvers\npoincare = ODEProblem(double_pendulum_hamiltonian, initial, tspan)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(poincare, Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "In time, the solution looks like:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots; gr()\nplot(sol, vars=[(0,3),(0,4)], leg=false, plotdensity=10000)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "while it has the well-known phase-space plot:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol, vars=(3,4), leg=false)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Local Optimization\n\nLet's fine out what some of the local maxima and minima are. Optim.jl can be used to minimize functions, and the solution type has a continuous interpolation which can be used. Let's look for the local optima for the 4th variable around `t=20`. Thus our optimization function is:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f = (t) -> sol(t,idxs=4)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "`first(t)` is the same as `t[1]` which transforms the array of size 1 into a number. `idxs=4` is the same as `sol(first(t))[4]` but does the calculation without a temporary array and thus is faster. To find a local minima, we can simply call Optim on this function. Let's find a local minimum:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Optim\nopt = optimize(f,18.0,22.0)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "From this printout we see that the minimum is at `t=18.63` and the value is `-2.79e-2`. We can get these in code-form via:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "println(opt.minimizer)\nprintln(opt.minimum)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "To get the maximum, we just minimize the negative of the function:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f = (t) -> -sol(first(t),idxs=4)\nopt2 = optimize(f,0.0,22.0)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's add the maxima and minima to the plots:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol, vars=(0,4), plotdensity=10000)\nscatter!([opt.minimizer],[opt.minimum],label=\"Local Min\")\nscatter!([opt2.minimizer],[-opt2.minimum],label=\"Local Max\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Brent's method will locally minimize over the full interval. If we instead want a local maxima nearest to a point, we can use `BFGS()`. In this case, we need to optimize a vector `[t]`, and thus dereference it to a number using `first(t)`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f = (t) -> -sol(first(t),idxs=4)\nopt = optimize(f,[20.0],BFGS())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### Global Optimization\n\nIf we instead want to find global maxima and minima, we need to look somewhere else. For this there are many choices. A pure Julia option is BlackBoxOptim.jl, but I will use NLopt.jl. Following the NLopt.jl tutorial but replacing their function with out own:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "import NLopt, ForwardDiff\n\ncount = 0 # keep track of # function evaluations\n\nfunction g(t::Vector, grad::Vector)\n if length(grad) > 0\n #use ForwardDiff for the gradients\n grad[1] = ForwardDiff.derivative((t)->sol(first(t),idxs=4),t)\n end\n sol(first(t),idxs=4)\nend\nopt = NLopt.Opt(:GN_ORIG_DIRECT_L, 1)\nNLopt.lower_bounds!(opt, [0.0])\nNLopt.upper_bounds!(opt, [40.0])\nNLopt.xtol_rel!(opt,1e-8)\nNLopt.min_objective!(opt, g)\n(minf,minx,ret) = NLopt.optimize(opt,[20.0])\nprintln(minf,\" \",minx,\" \",ret)\nNLopt.max_objective!(opt, g)\n(maxf,maxx,ret) = NLopt.optimize(opt,[20.0])\nprintln(maxf,\" \",maxx,\" \",ret)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol, vars=(0,4), plotdensity=10000)\nscatter!([minx],[minf],label=\"Global Min\")\nscatter!([maxx],[maxf],label=\"Global Max\")" - ], - "metadata": {}, - "execution_count": null - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/ode_extras/04-monte_carlo_parameter_estim.ipynb b/notebook/ode_extras/04-monte_carlo_parameter_estim.ipynb deleted file mode 100644 index ab6a67b6..00000000 --- a/notebook/ode_extras/04-monte_carlo_parameter_estim.ipynb +++ /dev/null @@ -1,204 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Monte Carlo Parameter Estimation From Data\n### Chris Rackauckas\n\nFirst you want to create a problem which solves multiple problems at the same time. This is the Monte Carlo Problem. When the parameter estimation tools say it will take any DEProblem, it really means ANY DEProblem!\n\nSo, let's get a Monte Carlo problem setup that solves with 10 different initial conditions." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, DiffEqParamEstim, Plots, Optim\n\n# Monte Carlo Problem Set Up for solving set of ODEs with different initial conditions\n\n# Set up Lotka-Volterra system\nfunction pf_func(du,u,p,t)\n du[1] = p[1] * u[1] - p[2] * u[1]*u[2]\n du[2] = -3 * u[2] + u[1]*u[2]\nend\np = [1.5,1.0]\nprob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),p)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now for a MonteCarloProblem we have to take this problem and tell it what to do N times via the prob_func. So let's generate N=10 different initial conditions, and tell it to run the same problem but with these 10 different initial conditions each time:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Setting up to solve the problem N times (for the N different initial conditions)\nN = 10;\ninitial_conditions = [[1.0,1.0], [1.0,1.5], [1.5,1.0], [1.5,1.5], [0.5,1.0], [1.0,0.5], [0.5,0.5], [2.0,1.0], [1.0,2.0], [2.0,2.0]]\nfunction prob_func(prob,i,repeat)\n ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p)\nend\nmonte_prob = MonteCarloProblem(prob,prob_func=prob_func)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can check this does what we want by solving it:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Check above does what we want\nsim = solve(monte_prob,Tsit5(),num_monte=N)\nplot(sim)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "num_monte=N means \"run N times\", and each time it runs the problem returned by the prob_func, which is always the same problem but with the ith initial condition.\n\nNow let's generate a dataset from that. Let's get data points at every t=0.1 using saveat, and then convert the solution into an array." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Generate a dataset from these runs\ndata_times = 0.0:0.1:10.0\nsim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times)\ndata = Array(sim)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here, data[i,j,k] is the same as sim[i,j,k] which is the same as sim[k][i,j] (where sim[k] is the kth solution). So data[i,j,k] is the jth timepoint of the ith variable in the kth trajectory.\n\nNow let's build a loss function. A loss function is some loss(sol) that spits out a scalar for how far from optimal we are. In the documentation I show that we normally do loss = L2Loss(t,data), but we can bootstrap off of this. Instead lets build an array of N loss functions, each one with the correct piece of data." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# Building a loss function\nlosses = [L2Loss(data_times,data[:,:,i]) for i in 1:N]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "So losses[i] is a function which computes the loss of a solution against the data of the ith trajectory. So to build our true loss function, we sum the losses:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "loss(sim) = sum(losses[i](sim[i]) for i in 1:N)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "As a double check, make sure that loss(sim) outputs zero (since we generated the data from sim). Now we generate data with other parameters:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),[1.2,0.8])\nfunction prob_func(prob,i,repeat)\n ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p)\nend\nmonte_prob = MonteCarloProblem(prob,prob_func=prob_func)\nsim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times)\nloss(sim)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and get a non-zero loss. So we now have our problem, our data, and our loss function... we have what we need.\n\nPut this into build_loss_objective." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N,\n saveat=data_times)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that I added the kwargs for solve into this. They get passed to an internal solve command, so then the loss is computed on N trajectories at data_times.\n\nThus we take this objective function over to any optimization package. I like to do quick things in Optim.jl. Here, since the Lotka-Volterra equation requires positive parameters, I use Fminbox to make sure the parameters stay positive. I start the optimization with [1.3,0.9], and Optim spits out that the true parameters are:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "lower = zeros(2)\nupper = fill(2.0,2)\nresult = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS()))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "result" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Optim finds one but not the other parameter.\n\nI would run a test on synthetic data for your problem before using it on real data. Maybe play around with different optimization packages, or add regularization. You may also want to decrease the tolerance of the ODE solvers via" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N,\n abstol=1e-8,reltol=1e-8,\n saveat=data_times)\nresult = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS()))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "result" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "if you suspect error is the problem. However, if you're having problems it's most likely not the ODE solver tolerance and mostly because parameter inference is a very hard optimization problem." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/test.ipynb b/notebook/test.ipynb deleted file mode 100644 index b1266b71..00000000 --- a/notebook/test.ipynb +++ /dev/null @@ -1,35 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "This is a test of the builder system.\n# Test\n### Chris Rackauckas" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DiffEqTutorials\nDiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file])" - ], - "metadata": {}, - "execution_count": null - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.0" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.0", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/type_handling/01-number_types.ipynb b/notebook/type_handling/01-number_types.ipynb deleted file mode 100644 index dcc96fbf..00000000 --- a/notebook/type_handling/01-number_types.ipynb +++ /dev/null @@ -1,170 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Solving Equations in With Julia-Defined Types\n### Chris Rackauckas\n\nOne of the nice things about DifferentialEquations.jl is that it is designed with Julia's type system in mind. What this means is, if you have properly defined a Number type, you can use this number type in DifferentialEquations.jl's algorithms! [Note that this is restricted to the native algorithms of OrdinaryDiffEq.jl. The other solvers such as ODE.jl, Sundials.jl, and ODEInterface.jl are not compatible with some number systems.]\n\nDifferentialEquations.jl determines the numbers to use in its solvers via the types that are designated by `tspan` and the initial condition of the problem. It will keep the time values in the same type as tspan, and the solution values in the same type as the initial condition. [Note that adaptive timestepping requires that the time type is compaible with `sqrt` and `^` functions. Thus dt cannot be Integer or numbers like that if adaptive timestepping is chosen].\n\nLet's solve the linear ODE first define an easy way to get ODEProblems for the linear ODE:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations\nf = (u,p,t) -> (p*u)\nprob_ode_linear = ODEProblem(f,1/2,(0.0,1.0),1.01);" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "First let's solve it using Float64s. To do so, we just need to set u0 to a Float64 (which is done by the default) and dt should be a float as well." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = prob_ode_linear\nsol =solve(prob,Tsit5())\nprintln(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that both the times and the solutions were saved as Float64. Let's change the time to use rational values. Rationals are not compatible with adaptive time stepping since they do not have an L2 norm (this can be worked around by defining `internalnorm`, but rationals already explode in size!). To account for this, let's turn off adaptivity as well:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(f,1/2,(0//1,1//1),101//100);\nsol = solve(prob,RK4(),dt=1//2^(6),adaptive=false)\nprintln(sol)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Now let's do something fun. Let's change the solution to use `Rational{BigInt}` and print out the value at the end of the simulation. To do so, simply change the definition of the initial condition." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob = ODEProblem(f,BigInt(1)//BigInt(2),(0//1,1//1),101//100);\nsol =solve(prob,RK4(),dt=1//2^(6),adaptive=false)\nprintln(sol[end])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "That's one huge fraction!\n\n## Other Compatible Number Types\n\n#### BigFloats" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "prob_ode_biglinear = ODEProblem(f,big(1.0)/big(2.0),(big(0.0),big(1.0)),big(1.01))\nsol =solve(prob_ode_biglinear,Tsit5())\nprintln(sol[end])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### DoubleFloats.jl\n\nThere's are Float128-like types. Higher precision, but fixed and faster than arbitrary precision." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DoubleFloats\nprob_ode_doublelinear = ODEProblem(f,Double64(1)/Double64(2),(Double64(0),Double64(1)),Double64(1.01))\nsol =solve(prob_ode_doublelinear,Tsit5())\nprintln(sol[end])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### ArbFloats\n\nThese high precision numbers which are much faster than Bigs for less than 500-800 bits of accuracy." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using ArbNumerics\nprob_ode_arbfloatlinear = ODEProblem(f,ArbFloat(1)/ArbFloat(2),(ArbFloat(0.0),ArbFloat(1.0)),ArbFloat(1.01))\nsol =solve(prob_ode_arbfloatlinear,Tsit5())\nprintln(sol[end])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Incompatible Number Systems\n\n#### DecFP.jl\n\nNext let's try DecFP. DecFP is a fixed-precision decimals library which is made to give both performance but known decimals of accuracy. Having already installed DecFP with `]add DecFP`, I can run the following:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DecFP\nprob_ode_decfplinear = ODEProblem(f,Dec128(1)/Dec128(2),(Dec128(0.0),Dec128(1.0)),Dec128(1.01))\nsol =solve(prob_ode_decfplinear,Tsit5())\nprintln(sol[end]); println(typeof(sol[end]))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### Decimals.jl\n\nInstall with `]add Decimals`." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Decimals\nprob_ode_decimallinear = ODEProblem(f,[decimal(\"1.0\")]./[decimal(\"2.0\")],(0//1,1//1),decimal(1.01))\nsol =solve(prob_ode_decimallinear,RK4(),dt=1/2^(6)) #Fails\nprintln(sol[end]); println(typeof(sol[end]))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "At the time of writing this, Decimals are not compatible. This is not on DifferentialEquations.jl's end, it's on partly on Decimal's end since it is not a subtype of Number. Thus it's not recommended you use Decimals with DifferentialEquations.jl\n\n## Conclusion\n\nAs you can see, DifferentialEquations.jl can use arbitrary Julia-defined number systems in its arithmetic. If you need 128-bit floats, i.e. a bit more precision but not arbitrary, DoubleFloats.jl is a very good choice! For arbitrary precision, ArbNumerics are the most feature-complete and give great performance compared to BigFloats, and thus I recommend their use when high-precision (less than 512-800 bits) is required. DecFP is a great library for high-performance decimal numbers and works well as well. Other number systems could use some modernization." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/type_handling/02-uncertainties.ipynb b/notebook/type_handling/02-uncertainties.ipynb deleted file mode 100644 index dcf800ef..00000000 --- a/notebook/type_handling/02-uncertainties.ipynb +++ /dev/null @@ -1,174 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Numbers with Uncertainties\n### Mosè Giordano, Chris Rackauckas\n\nThe result of a measurement should be given as a number with an attached uncertainties, besides the physical unit, and all operations performed involving the result of the measurement should propagate the uncertainty, taking care of correlation between quantities.\n\nThere is a Julia package for dealing with numbers with uncertainties: [`Measurements.jl`](https://github.com/JuliaPhysics/Measurements.jl). Thanks to Julia's features, `DifferentialEquations.jl` easily works together with `Measurements.jl` out-of-the-box.\n\nThis notebook will cover some of the examples from the tutorial about classical Physics.\n\n## Caveat about `Measurement` type\n\nBefore going on with the tutorial, we must point up a subtlety of `Measurements.jl` that you should be aware of:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Measurements\n\n5.23 ± 0.14 === 5.23 ± 0.14" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "(5.23± 0.14) - (5.23 ± 0.14)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "(5.23 ± 0.14) / (5.23 ± 0.14)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The two numbers above, even though have the same nominal value and the same uncertainties, are actually two different measurements that only by chance share the same figures and their difference and their ratio have a non-zero uncertainty. It is common in physics to get very similar, or even equal, results for a repeated measurement, but the two measurements are not the same thing.\n\nInstead, if you have *one measurement* and want to perform some operations involving it, you have to assign it to a variable:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "x = 5.23 ± 0.14\nx === x" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "x - x" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "x / x" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Radioactive Decay of Carbon-14\n\nThe rate of decay of carbon-14 is governed by a first order linear ordinary differential equation\n\n$$\\frac{\\mathrm{d}u(t)}{\\mathrm{d}t} = -\\frac{u(t)}{\\tau}$$\n\nwhere $\\tau$ is the mean lifetime of carbon-14, which is related to the half-life $t_{1/2} = (5730 \\pm 40)$ years by the relation $\\tau = t_{1/2}/\\ln(2)$." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, Measurements, Plots\n\n# Half-life and mean lifetime of radiocarbon, in years\nt_12 = 5730 ± 40\nτ = t_12 / log(2)\n\n#Setup\nu₀ = 1 ± 0\ntspan = (0.0, 10000.0)\n\n#Define the problem\nradioactivedecay(u,p,t) = - u / τ\n\n#Pass to solver\nprob = ODEProblem(radioactivedecay, u₀, tspan)\nsol = solve(prob, Tsit5(), reltol = 1e-8)\n\n# Analytic solution\nu = exp.(- sol.t / τ)\n\nplot(sol.t, sol.u, label = \"Numerical\", xlabel = \"Years\", ylabel = \"Fraction of Carbon-14\")\nplot!(sol.t, u, label = \"Analytic\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The two curves are perfectly superimposed, indicating that the numerical solution matches the analytic one. We can check that also the uncertainties are correctly propagated in the numerical solution:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "println(\"Quantity of carbon-14 after \", sol.t[11], \" years:\")\nprintln(\"Numerical: \", sol[11])\nprintln(\"Analytic: \", u[11])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Both the value of the numerical solution and its uncertainty match the analytic solution within the requested tolerance. We can also note that close to 5730 years after the beginning of the decay (half-life of the radioisotope), the fraction of carbon-14 that survived is about 0.5.\n\n## Simple pendulum\n\n### Small angles approximation\n\nThe next problem we are going to study is the simple pendulum in the approximation of small angles. We address this simplified case because there exists an easy analytic solution to compare.\n\nThe differential equation we want to solve is\n\n$$\\ddot{\\theta} + \\frac{g}{L} \\theta = 0$$\n\nwhere $g = (9.79 \\pm 0.02)~\\mathrm{m}/\\mathrm{s}^2$ is the gravitational acceleration measured where the experiment is carried out, and $L = (1.00 \\pm 0.01)~\\mathrm{m}$ is the length of the pendulum.\n\nWhen you set up the problem for `DifferentialEquations.jl` remember to define the measurements as variables, as seen above." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, Measurements, Plots\n\ng = 9.79 ± 0.02; # Gravitational constants\nL = 1.00 ± 0.01; # Length of the pendulum\n\n#Initial Conditions\nu₀ = [0 ± 0, π / 60 ± 0.01] # Initial speed and initial angle\ntspan = (0.0, 6.3)\n\n#Define the problem\nfunction simplependulum(du,u,p,t)\n θ = u[1]\n dθ = u[2]\n du[1] = dθ\n du[2] = -(g/L)*θ\nend\n\n#Pass to solvers\nprob = ODEProblem(simplependulum, u₀, tspan)\nsol = solve(prob, Tsit5(), reltol = 1e-6)\n\n# Analytic solution\nu = u₀[2] .* cos.(sqrt(g / L) .* sol.t)\n\nplot(sol.t, getindex.(sol.u, 2), label = \"Numerical\")\nplot!(sol.t, u, label = \"Analytic\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Also in this case there is a perfect superimposition between the two curves, including their uncertainties.\n\nWe can also have a look at the difference between the two solutions:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "plot(sol.t, getindex.(sol.u, 2) .- u, label = \"\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Arbitrary amplitude\n\nNow that we know how to solve differential equations involving numbers with uncertainties we can solve the simple pendulum problem without any approximation. This time the differential equation to solve is the following:\n\n$$\\ddot{\\theta} + \\frac{g}{L} \\sin(\\theta) = 0$$" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "g = 9.79 ± 0.02; # Gravitational constants\nL = 1.00 ± 0.01; # Length of the pendulum\n\n#Initial Conditions\nu₀ = [0 ± 0, π / 3 ± 0.02] # Initial speed and initial angle\ntspan = (0.0, 6.3)\n\n#Define the problem\nfunction simplependulum(du,u,p,t)\n θ = u[1]\n dθ = u[2]\n du[1] = dθ\n du[2] = -(g/L) * sin(θ)\nend\n\n#Pass to solvers\nprob = ODEProblem(simplependulum, u₀, tspan)\nsol = solve(prob, Tsit5(), reltol = 1e-6)\n\nplot(sol.t, getindex.(sol.u, 2), label = \"Numerical\")" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We note that in this case the period of the oscillations is not constant." - ], - "metadata": {} - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/notebook/type_handling/03-unitful.ipynb b/notebook/type_handling/03-unitful.ipynb deleted file mode 100644 index f75da5f9..00000000 --- a/notebook/type_handling/03-unitful.ipynb +++ /dev/null @@ -1,163 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Unit Checked Arithmetic via Unitful.jl\n### Chris Rackauckas\n\nUnits and dimensional analysis are standard tools across the sciences for checking the correctness of your equation. However, most ODE solvers only allow for the equation to be in dimensionless form, leaving it up to the user to both convert the equation to a dimensionless form, punch in the equations, and hopefully not make an error along the way.\n\nDifferentialEquations.jl allows for one to use Unitful.jl to have unit-checked arithmetic natively in the solvers. Given the dispatch implementation of the Unitful, this has little overhead.\n\n## Using Unitful\n\nTo use Unitful, you need to have the package installed. Then you can add units to your variables. For example:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Unitful\nt = 1.0u\"s\"" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that `t` is a variable with units in seconds. If we make another value with seconds, they can add" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "t2 = 1.02u\"s\"\nt+t2" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and they can multiply:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "t*t2" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "You can even do rational roots:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "sqrt(t)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Many operations work. These operations will check to make sure units are correct, and will throw an error for incorrect operations:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "t + sqrt(t)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Using Unitful with DifferentialEquations.jl\n\nJust like with other number systems, you can choose the units for your numbers by simply specifying the units of the initial condition and the timestep. For example, to solve the linear ODE where the variable has units of Newton's and `t` is in Seconds, we would use:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations\nf = (y,p,t) -> 0.5*y\nu0 = 1.5u\"N\"\nprob = ODEProblem(f,u0,(0.0u\"s\",1.0u\"s\"))\nsol = solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Notice that we recieved a unit mismatch error. This is correctly so! Remember that for an ODE:\n\n$$\\frac{dy}{dt} = f(t,y)$$\n\nwe must have that `f` is a rate, i.e. `f` is a change in `y` per unit time. So we need to fix the units of `f` in our example to be `N/s`. Notice that we then do not receive an error if we do the following:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "f = (y,p,t) -> 0.5*y/3.0u\"s\"\nprob = ODEProblem(f,u0,(0.0u\"s\",1.0u\"s\"))\nsol = solve(prob,Tsit5())" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This gives a a normal solution object. Notice that the values are all with the correct units:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "print(sol[:])" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "We can plot the solution by removing the units:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using Plots\ngr()\nplot(ustrip(sol.t),ustrip(sol[:]),lw=3)" - ], - "metadata": {}, - "execution_count": null - } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" - } - }, - "nbformat": 4 -} diff --git a/pdf/advanced/01-beeler_reuter.pdf b/pdf/advanced/01-beeler_reuter.pdf deleted file mode 100644 index b9bc905b..00000000 Binary files a/pdf/advanced/01-beeler_reuter.pdf and /dev/null differ diff --git a/pdf/advanced/02-advanced_ODE_solving.pdf b/pdf/advanced/02-advanced_ODE_solving.pdf deleted file mode 100644 index 502abfca..00000000 Binary files a/pdf/advanced/02-advanced_ODE_solving.pdf and /dev/null differ diff --git a/pdf/exercises/01-workshop_exercises.pdf b/pdf/exercises/01-workshop_exercises.pdf deleted file mode 100644 index 471f4857..00000000 Binary files a/pdf/exercises/01-workshop_exercises.pdf and /dev/null differ diff --git a/pdf/exercises/02-workshop_solutions.pdf b/pdf/exercises/02-workshop_solutions.pdf deleted file mode 100644 index 9b733988..00000000 Binary files a/pdf/exercises/02-workshop_solutions.pdf and /dev/null differ diff --git a/pdf/introduction/01-ode_introduction.pdf b/pdf/introduction/01-ode_introduction.pdf deleted file mode 100644 index af52294a..00000000 Binary files a/pdf/introduction/01-ode_introduction.pdf and /dev/null differ diff --git a/pdf/introduction/02-choosing_algs.pdf b/pdf/introduction/02-choosing_algs.pdf deleted file mode 100644 index 655b1420..00000000 Binary files a/pdf/introduction/02-choosing_algs.pdf and /dev/null differ diff --git a/pdf/introduction/03-optimizing_diffeq_code.pdf b/pdf/introduction/03-optimizing_diffeq_code.pdf deleted file mode 100644 index 6ba95cec..00000000 Binary files a/pdf/introduction/03-optimizing_diffeq_code.pdf and /dev/null differ diff --git a/pdf/introduction/04-callbacks_and_events.pdf b/pdf/introduction/04-callbacks_and_events.pdf deleted file mode 100644 index 9dcbaf31..00000000 Binary files a/pdf/introduction/04-callbacks_and_events.pdf and /dev/null differ diff --git a/pdf/introduction/05-formatting_plots.pdf b/pdf/introduction/05-formatting_plots.pdf deleted file mode 100644 index 693f76ef..00000000 Binary files a/pdf/introduction/05-formatting_plots.pdf and /dev/null differ diff --git a/pdf/models/01-classical_physics.pdf b/pdf/models/01-classical_physics.pdf deleted file mode 100644 index dbca06fc..00000000 Binary files a/pdf/models/01-classical_physics.pdf and /dev/null differ diff --git a/pdf/models/02-conditional_dosing.pdf b/pdf/models/02-conditional_dosing.pdf deleted file mode 100644 index f3fc40f1..00000000 Binary files a/pdf/models/02-conditional_dosing.pdf and /dev/null differ diff --git a/pdf/models/03-diffeqbio_I_introduction.pdf b/pdf/models/03-diffeqbio_I_introduction.pdf deleted file mode 100644 index b510e10e..00000000 Binary files a/pdf/models/03-diffeqbio_I_introduction.pdf and /dev/null differ diff --git a/pdf/models/04-diffeqbio_II_networkproperties.pdf b/pdf/models/04-diffeqbio_II_networkproperties.pdf deleted file mode 100644 index cc71d1bc..00000000 Binary files a/pdf/models/04-diffeqbio_II_networkproperties.pdf and /dev/null differ diff --git a/pdf/models/05-kepler_problem.pdf b/pdf/models/05-kepler_problem.pdf deleted file mode 100644 index c4a7e55f..00000000 Binary files a/pdf/models/05-kepler_problem.pdf and /dev/null differ diff --git a/pdf/models/06-pendulum_bayesian_inference.pdf b/pdf/models/06-pendulum_bayesian_inference.pdf deleted file mode 100644 index 1f65d69d..00000000 Binary files a/pdf/models/06-pendulum_bayesian_inference.pdf and /dev/null differ diff --git a/pdf/models/07-outer_solar_system.pdf b/pdf/models/07-outer_solar_system.pdf deleted file mode 100644 index 340a4973..00000000 Binary files a/pdf/models/07-outer_solar_system.pdf and /dev/null differ diff --git a/pdf/ode_extras/01-ModelingToolkit.pdf b/pdf/ode_extras/01-ModelingToolkit.pdf deleted file mode 100644 index 10c2e355..00000000 Binary files a/pdf/ode_extras/01-ModelingToolkit.pdf and /dev/null differ diff --git a/pdf/ode_extras/02-feagin.pdf b/pdf/ode_extras/02-feagin.pdf deleted file mode 100644 index f5c31ff7..00000000 Binary files a/pdf/ode_extras/02-feagin.pdf and /dev/null differ diff --git a/pdf/ode_extras/03-ode_minmax.pdf b/pdf/ode_extras/03-ode_minmax.pdf deleted file mode 100644 index 3afb4537..00000000 Binary files a/pdf/ode_extras/03-ode_minmax.pdf and /dev/null differ diff --git a/pdf/ode_extras/04-monte_carlo_parameter_estim.pdf b/pdf/ode_extras/04-monte_carlo_parameter_estim.pdf deleted file mode 100644 index 2ca0da8b..00000000 Binary files a/pdf/ode_extras/04-monte_carlo_parameter_estim.pdf and /dev/null differ diff --git a/pdf/test.pdf b/pdf/test.pdf deleted file mode 100644 index 7a76a68f..00000000 Binary files a/pdf/test.pdf and /dev/null differ diff --git a/pdf/type_handling/01-number_types.pdf b/pdf/type_handling/01-number_types.pdf deleted file mode 100644 index 92fecd47..00000000 Binary files a/pdf/type_handling/01-number_types.pdf and /dev/null differ diff --git a/pdf/type_handling/02-uncertainties.pdf b/pdf/type_handling/02-uncertainties.pdf deleted file mode 100644 index a25c1fa1..00000000 Binary files a/pdf/type_handling/02-uncertainties.pdf and /dev/null differ diff --git a/pdf/type_handling/03-unitful.pdf b/pdf/type_handling/03-unitful.pdf deleted file mode 100644 index 61fad739..00000000 Binary files a/pdf/type_handling/03-unitful.pdf and /dev/null differ diff --git a/script/advanced/01-beeler_reuter.jl b/script/advanced/01-beeler_reuter.jl deleted file mode 100644 index e855ea24..00000000 --- a/script/advanced/01-beeler_reuter.jl +++ /dev/null @@ -1,455 +0,0 @@ - -const v0 = -84.624 -const v1 = 10.0 -const C_K1 = 1.0f0 -const C_x1 = 1.0f0 -const C_Na = 1.0f0 -const C_s = 1.0f0 -const D_Ca = 0.0f0 -const D_Na = 0.0f0 -const g_s = 0.09f0 -const g_Na = 4.0f0 -const g_NaC = 0.005f0 -const ENa = 50.0f0 + D_Na -const γ = 0.5f0 -const C_m = 1.0f0 - - -mutable struct BeelerReuterCpu <: Function - t::Float64 # the last timestep time to calculate Δt - diff_coef::Float64 # the diffusion-coefficient (coupling strength) - - C::Array{Float32, 2} # intracellular calcium concentration - M::Array{Float32, 2} # sodium current activation gate (m) - H::Array{Float32, 2} # sodium current inactivation gate (h) - J::Array{Float32, 2} # sodium current slow inactivaiton gate (j) - D::Array{Float32, 2} # calcium current activaiton gate (d) - F::Array{Float32, 2} # calcium current inactivation gate (f) - XI::Array{Float32, 2} # inward-rectifying potassium current (iK1) - - Δu::Array{Float64, 2} # place-holder for the Laplacian - - function BeelerReuterCpu(u0, diff_coef) - self = new() - - ny, nx = size(u0) - self.t = 0.0 - self.diff_coef = diff_coef - - self.C = fill(0.0001f0, (ny,nx)) - self.M = fill(0.01f0, (ny,nx)) - self.H = fill(0.988f0, (ny,nx)) - self.J = fill(0.975f0, (ny,nx)) - self.D = fill(0.003f0, (ny,nx)) - self.F = fill(0.994f0, (ny,nx)) - self.XI = fill(0.0001f0, (ny,nx)) - - self.Δu = zeros(ny,nx) - - return self - end -end - - -# 5-point stencil -function laplacian(Δu, u) - n1, n2 = size(u) - - # internal nodes - for j = 2:n2-1 - for i = 2:n1-1 - @inbounds Δu[i,j] = u[i+1,j] + u[i-1,j] + u[i,j+1] + u[i,j-1] - 4*u[i,j] - end - end - - # left/right edges - for i = 2:n1-1 - @inbounds Δu[i,1] = u[i+1,1] + u[i-1,1] + 2*u[i,2] - 4*u[i,1] - @inbounds Δu[i,n2] = u[i+1,n2] + u[i-1,n2] + 2*u[i,n2-1] - 4*u[i,n2] - end - - # top/bottom edges - for j = 2:n2-1 - @inbounds Δu[1,j] = u[1,j+1] + u[1,j-1] + 2*u[2,j] - 4*u[1,j] - @inbounds Δu[n1,j] = u[n1,j+1] + u[n1,j-1] + 2*u[n1-1,j] - 4*u[n1,j] - end - - # corners - @inbounds Δu[1,1] = 2*(u[2,1] + u[1,2]) - 4*u[1,1] - @inbounds Δu[n1,1] = 2*(u[n1-1,1] + u[n1,2]) - 4*u[n1,1] - @inbounds Δu[1,n2] = 2*(u[2,n2] + u[1,n2-1]) - 4*u[1,n2] - @inbounds Δu[n1,n2] = 2*(u[n1-1,n2] + u[n1,n2-1]) - 4*u[n1,n2] -end - - -@inline function rush_larsen(g, α, β, Δt) - inf = α/(α+β) - τ = 1f0 / (α+β) - return clamp(g + (g - inf) * expm1(-Δt/τ), 0f0, 1f0) -end - - -function update_M_cpu(g, v, Δt) - # the condition is needed here to prevent NaN when v == 47.0 - α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (exp(-0.1f0*(v+47.0f0)) - 1.0f0) - β = (40.0f0 * exp(-0.056f0*(v+72.0f0))) - return rush_larsen(g, α, β, Δt) -end - -function update_H_cpu(g, v, Δt) - α = 0.126f0 * exp(-0.25f0*(v+77.0f0)) - β = 1.7f0 / (exp(-0.082f0*(v+22.5f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_J_cpu(g, v, Δt) - α = (0.55f0 * exp(-0.25f0*(v+78.0f0))) / (exp(-0.2f0*(v+78.0f0)) + 1.0f0) - β = 0.3f0 / (exp(-0.1f0*(v+32.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_D_cpu(g, v, Δt) - α = γ * (0.095f0 * exp(-0.01f0*(v-5.0f0))) / (exp(-0.072f0*(v-5.0f0)) + 1.0f0) - β = γ * (0.07f0 * exp(-0.017f0*(v+44.0f0))) / (exp(0.05f0*(v+44.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_F_cpu(g, v, Δt) - α = γ * (0.012f0 * exp(-0.008f0*(v+28.0f0))) / (exp(0.15f0*(v+28.0f0)) + 1.0f0) - β = γ * (0.0065f0 * exp(-0.02f0*(v+30.0f0))) / (exp(-0.2f0*(v+30.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_XI_cpu(g, v, Δt) - α = (0.0005f0 * exp(0.083f0*(v+50.0f0))) / (exp(0.057f0*(v+50.0f0)) + 1.0f0) - β = (0.0013f0 * exp(-0.06f0*(v+20.0f0))) / (exp(-0.04f0*(v+20.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - - -function update_C_cpu(g, d, f, v, Δt) - ECa = D_Ca - 82.3f0 - 13.0278f0 * log(g) - kCa = C_s * g_s * d * f - iCa = kCa * (v - ECa) - inf = 1.0f-7 * (0.07f0 - g) - τ = 1f0 / 0.07f0 - return g + (g - inf) * expm1(-Δt/τ) -end - - -function update_gates_cpu(u, XI, M, H, J, D, F, C, Δt) - let Δt = Float32(Δt) - n1, n2 = size(u) - for j = 1:n2 - for i = 1:n1 - v = Float32(u[i,j]) - - XI[i,j] = update_XI_cpu(XI[i,j], v, Δt) - M[i,j] = update_M_cpu(M[i,j], v, Δt) - H[i,j] = update_H_cpu(H[i,j], v, Δt) - J[i,j] = update_J_cpu(J[i,j], v, Δt) - D[i,j] = update_D_cpu(D[i,j], v, Δt) - F[i,j] = update_F_cpu(F[i,j], v, Δt) - - C[i,j] = update_C_cpu(C[i,j], D[i,j], F[i,j], v, Δt) - end - end - end -end - - -# iK1 is the inward-rectifying potassium current -function calc_iK1(v) - ea = exp(0.04f0*(v+85f0)) - eb = exp(0.08f0*(v+53f0)) - ec = exp(0.04f0*(v+53f0)) - ed = exp(-0.04f0*(v+23f0)) - return 0.35f0 * (4f0*(ea-1f0)/(eb + ec) - + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed))) -end - -# ix1 is the time-independent background potassium current -function calc_ix1(v, xi) - ea = exp(0.04f0*(v+77f0)) - eb = exp(0.04f0*(v+35f0)) - return xi * 0.8f0 * (ea-1f0) / eb -end - -# iNa is the sodium current (similar to the classic Hodgkin-Huxley model) -function calc_iNa(v, m, h, j) - return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa) -end - -# iCa is the calcium current -function calc_iCa(v, d, f, c) - ECa = D_Ca - 82.3f0 - 13.0278f0 * log(c) # ECa is the calcium reversal potential - return C_s * g_s * d * f * (v - ECa) -end - -function update_du_cpu(du, u, XI, M, H, J, D, F, C) - n1, n2 = size(u) - - for j = 1:n2 - for i = 1:n1 - v = Float32(u[i,j]) - - # calculating individual currents - iK1 = calc_iK1(v) - ix1 = calc_ix1(v, XI[i,j]) - iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j]) - iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j]) - - # total current - I_sum = iK1 + ix1 + iNa + iCa - - # the reaction part of the reaction-diffusion equation - du[i,j] = -I_sum / C_m - end - end -end - - -function (f::BeelerReuterCpu)(du, u, p, t) - Δt = t - f.t - - if Δt != 0 || t == 0 - update_gates_cpu(u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C, Δt) - f.t = t - end - - laplacian(f.Δu, u) - - # calculate the reaction portion - update_du_cpu(du, u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C) - - # ...add the diffusion portion - du .+= f.diff_coef .* f.Δu -end - - -const N = 192; -u0 = fill(v0, (N, N)); -u0[90:102,90:102] .= v1; # a small square in the middle of the domain - - -using Plots -heatmap(u0) - - -using DifferentialEquations, Sundials - -deriv_cpu = BeelerReuterCpu(u0, 1.0); -prob = ODEProblem(deriv_cpu, u0, (0.0, 50.0)); - - -@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0); - - -heatmap(sol.u[end]) - - -using CUDAnative, CuArrays - -mutable struct BeelerReuterGpu <: Function - t::Float64 # the last timestep time to calculate Δt - diff_coef::Float64 # the diffusion-coefficient (coupling strength) - - d_C::CuArray{Float32, 2} # intracellular calcium concentration - d_M::CuArray{Float32, 2} # sodium current activation gate (m) - d_H::CuArray{Float32, 2} # sodium current inactivation gate (h) - d_J::CuArray{Float32, 2} # sodium current slow inactivaiton gate (j) - d_D::CuArray{Float32, 2} # calcium current activaiton gate (d) - d_F::CuArray{Float32, 2} # calcium current inactivation gate (f) - d_XI::CuArray{Float32, 2} # inward-rectifying potassium current (iK1) - - d_u::CuArray{Float64, 2} # place-holder for u in the device memory - d_du::CuArray{Float64, 2} # place-holder for d_u in the device memory - - Δv::Array{Float64, 2} # place-holder for voltage gradient - - function BeelerReuterGpu(u0, diff_coef) - self = new() - - ny, nx = size(u0) - @assert (nx % 16 == 0) && (ny % 16 == 0) - self.t = 0.0 - self.diff_coef = diff_coef - - self.d_C = CuArray(fill(0.0001f0, (ny,nx))) - self.d_M = CuArray(fill(0.01f0, (ny,nx))) - self.d_H = CuArray(fill(0.988f0, (ny,nx))) - self.d_J = CuArray(fill(0.975f0, (ny,nx))) - self.d_D = CuArray(fill(0.003f0, (ny,nx))) - self.d_F = CuArray(fill(0.994f0, (ny,nx))) - self.d_XI = CuArray(fill(0.0001f0, (ny,nx))) - - self.d_u = CuArray(u0) - self.d_du = CuArray(zeros(ny,nx)) - - self.Δv = zeros(ny,nx) - - return self - end -end - - -function rush_larsen_gpu(g, α, β, Δt) - inf = α/(α+β) - τ = 1.0/(α+β) - return clamp(g + (g - inf) * CUDAnative.expm1(-Δt/τ), 0f0, 1f0) -end - -function update_M_gpu(g, v, Δt) - # the condition is needed here to prevent NaN when v == 47.0 - α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (CUDAnative.exp(-0.1f0*(v+47.0f0)) - 1.0f0) - β = (40.0f0 * CUDAnative.exp(-0.056f0*(v+72.0f0))) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_H_gpu(g, v, Δt) - α = 0.126f0 * CUDAnative.exp(-0.25f0*(v+77.0f0)) - β = 1.7f0 / (CUDAnative.exp(-0.082f0*(v+22.5f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_J_gpu(g, v, Δt) - α = (0.55f0 * CUDAnative.exp(-0.25f0*(v+78.0f0))) / (CUDAnative.exp(-0.2f0*(v+78.0f0)) + 1.0f0) - β = 0.3f0 / (CUDAnative.exp(-0.1f0*(v+32.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_D_gpu(g, v, Δt) - α = γ * (0.095f0 * CUDAnative.exp(-0.01f0*(v-5.0f0))) / (CUDAnative.exp(-0.072f0*(v-5.0f0)) + 1.0f0) - β = γ * (0.07f0 * CUDAnative.exp(-0.017f0*(v+44.0f0))) / (CUDAnative.exp(0.05f0*(v+44.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_F_gpu(g, v, Δt) - α = γ * (0.012f0 * CUDAnative.exp(-0.008f0*(v+28.0f0))) / (CUDAnative.exp(0.15f0*(v+28.0f0)) + 1.0f0) - β = γ * (0.0065f0 * CUDAnative.exp(-0.02f0*(v+30.0f0))) / (CUDAnative.exp(-0.2f0*(v+30.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_XI_gpu(g, v, Δt) - α = (0.0005f0 * CUDAnative.exp(0.083f0*(v+50.0f0))) / (CUDAnative.exp(0.057f0*(v+50.0f0)) + 1.0f0) - β = (0.0013f0 * CUDAnative.exp(-0.06f0*(v+20.0f0))) / (CUDAnative.exp(-0.04f0*(v+20.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_C_gpu(c, d, f, v, Δt) - ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c) - kCa = C_s * g_s * d * f - iCa = kCa * (v - ECa) - inf = 1.0f-7 * (0.07f0 - c) - τ = 1f0 / 0.07f0 - return c + (c - inf) * CUDAnative.expm1(-Δt/τ) -end - - -# iK1 is the inward-rectifying potassium current -function calc_iK1(v) - ea = CUDAnative.exp(0.04f0*(v+85f0)) - eb = CUDAnative.exp(0.08f0*(v+53f0)) - ec = CUDAnative.exp(0.04f0*(v+53f0)) - ed = CUDAnative.exp(-0.04f0*(v+23f0)) - return 0.35f0 * (4f0*(ea-1f0)/(eb + ec) - + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed))) -end - -# ix1 is the time-independent background potassium current -function calc_ix1(v, xi) - ea = CUDAnative.exp(0.04f0*(v+77f0)) - eb = CUDAnative.exp(0.04f0*(v+35f0)) - return xi * 0.8f0 * (ea-1f0) / eb -end - -# iNa is the sodium current (similar to the classic Hodgkin-Huxley model) -function calc_iNa(v, m, h, j) - return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa) -end - -# iCa is the calcium current -function calc_iCa(v, d, f, c) - ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c) # ECa is the calcium reversal potential - return C_s * g_s * d * f * (v - ECa) -end - - -function update_gates_gpu(u, XI, M, H, J, D, F, C, Δt) - i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x - j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y - - v = Float32(u[i,j]) - - let Δt = Float32(Δt) - XI[i,j] = update_XI_gpu(XI[i,j], v, Δt) - M[i,j] = update_M_gpu(M[i,j], v, Δt) - H[i,j] = update_H_gpu(H[i,j], v, Δt) - J[i,j] = update_J_gpu(J[i,j], v, Δt) - D[i,j] = update_D_gpu(D[i,j], v, Δt) - F[i,j] = update_F_gpu(F[i,j], v, Δt) - - C[i,j] = update_C_gpu(C[i,j], D[i,j], F[i,j], v, Δt) - end - nothing -end - -function update_du_gpu(du, u, XI, M, H, J, D, F, C) - i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x - j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y - - v = Float32(u[i,j]) - - # calculating individual currents - iK1 = calc_iK1(v) - ix1 = calc_ix1(v, XI[i,j]) - iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j]) - iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j]) - - # total current - I_sum = iK1 + ix1 + iNa + iCa - - # the reaction part of the reaction-diffusion equation - du[i,j] = -I_sum / C_m - nothing -end - - -function (f::BeelerReuterGpu)(du, u, p, t) - L = 16 # block size - Δt = t - f.t - copyto!(f.d_u, u) - ny, nx = size(u) - - if Δt != 0 || t == 0 - @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_gates_gpu( - f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C, Δt) - f.t = t - end - - laplacian(f.Δv, u) - - # calculate the reaction portion - @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_du_gpu( - f.d_du, f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C) - - copyto!(du, f.d_du) - - # ...add the diffusion portion - du .+= f.diff_coef .* f.Δv -end - - -using DifferentialEquations, Sundials - -deriv_gpu = BeelerReuterGpu(u0, 1.0); -prob = ODEProblem(deriv_gpu, u0, (0.0, 50.0)); -@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0); - - -heatmap(sol.u[end]) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/advanced/02-advanced_ODE_solving.jl b/script/advanced/02-advanced_ODE_solving.jl deleted file mode 100644 index 2de4cc7d..00000000 --- a/script/advanced/02-advanced_ODE_solving.jl +++ /dev/null @@ -1,200 +0,0 @@ - -ccall((:openblas_get_num_threads64_, Base.libblas_name), Cint, ()) - - -using LinearAlgebra -LinearAlgebra.BLAS.set_num_threads(4) - - -using DifferentialEquations -function rober(du,u,p,t) - y₁,y₂,y₃ = u - k₁,k₂,k₃ = p - du[1] = -k₁*y₁+k₃*y₂*y₃ - du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃ - du[3] = k₂*y₂^2 - nothing -end -prob = ODEProblem(rober,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) -sol = solve(prob,Rosenbrock23()) - -using Plots -plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) - - -using BenchmarkTools -@btime solve(prob) - - -function rober_jac(J,u,p,t) - y₁,y₂,y₃ = u - k₁,k₂,k₃ = p - J[1,1] = k₁ * -1 - J[2,1] = k₁ - J[3,1] = 0 - J[1,2] = y₃ * k₃ - J[2,2] = y₂ * k₂ * -2 + y₃ * k₃ * -1 - J[3,2] = y₂ * 2 * k₂ - J[1,3] = k₃ * y₂ - J[2,3] = k₃ * y₂ * -1 - J[3,3] = 0 - nothing -end -f = ODEFunction(rober, jac=rober_jac) -prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) - -@btime solve(prob_jac) - - -using ModelingToolkit -de = modelingtoolkitize(prob) -ModelingToolkit.generate_jacobian(de...)[2] # Second is in-place - - -:((##MTIIPVar#376, u, p, t)->begin - #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:65 =# - #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:66 =# - let (x₁, x₂, x₃, α₁, α₂, α₃) = (u[1], u[2], u[3], p[1], p[2], p[3]) - ##MTIIPVar#376[1] = α₁ * -1 - ##MTIIPVar#376[2] = α₁ - ##MTIIPVar#376[3] = 0 - ##MTIIPVar#376[4] = x₃ * α₃ - ##MTIIPVar#376[5] = x₂ * α₂ * -2 + x₃ * α₃ * -1 - ##MTIIPVar#376[6] = x₂ * 2 * α₂ - ##MTIIPVar#376[7] = α₃ * x₂ - ##MTIIPVar#376[8] = α₃ * x₂ * -1 - ##MTIIPVar#376[9] = 0 - end - #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:67 =# - nothing - end) - - -jac = eval(ModelingToolkit.generate_jacobian(de...)[2]) -f = ODEFunction(rober, jac=jac) -prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) - - -I = [1,2,1,2,3,1,2] -J = [1,1,2,2,2,3,3] -using SparseArrays -jac_prototype = sparse(I,J,1.0) - - -f = ODEFunction(rober, jac=jac, jac_prototype=jac_prototype) -prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) - - -const N = 32 -const xyd_brusselator = range(0,stop=1,length=N) -brusselator_f(x, y, t) = (((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) * (t >= 1.1) * 5. -limit(a, N) = a == N+1 ? 1 : a == 0 ? N : a -function brusselator_2d_loop(du, u, p, t) - A, B, alpha, dx = p - alpha = alpha/dx^2 - @inbounds for I in CartesianIndices((N, N)) - i, j = Tuple(I) - x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]] - ip1, im1, jp1, jm1 = limit(i+1, N), limit(i-1, N), limit(j+1, N), limit(j-1, N) - du[i,j,1] = alpha*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) + - B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t) - du[i,j,2] = alpha*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) + - A*u[i,j,1] - u[i,j,1]^2*u[i,j,2] - end -end -p = (3.4, 1., 10., step(xyd_brusselator)) - - -using SparsityDetection, SparseArrays -input = rand(32,32,2) -output = similar(input) -sparsity_pattern = sparsity!(brusselator_2d_loop,output,input,p,0.0) -jac_sparsity = Float64.(sparse(sparsity_pattern)) - - -using Plots -spy(jac_sparsity,markersize=1,colorbar=false,color=:deep) - - -f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity) - - -function init_brusselator_2d(xyd) - N = length(xyd) - u = zeros(N, N, 2) - for I in CartesianIndices((N, N)) - x = xyd[I[1]] - y = xyd[I[2]] - u[I,1] = 22*(y*(1-y))^(3/2) - u[I,2] = 27*(x*(1-x))^(3/2) - end - u -end -u0 = init_brusselator_2d(xyd_brusselator) -prob_ode_brusselator_2d = ODEProblem(brusselator_2d_loop, - u0,(0.,11.5),p) - -prob_ode_brusselator_2d_sparse = ODEProblem(f, - u0,(0.,11.5),p) - - -@btime solve(prob_ode_brusselator_2d,save_everystep=false) -@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false) - - -using SparseDiffTools -colorvec = matrix_colors(jac_sparsity) -@show maximum(colorvec) - - -f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity, - colorvec=colorvec) -prob_ode_brusselator_2d_sparse = ODEProblem(f, - init_brusselator_2d(xyd_brusselator), - (0.,11.5),p) -@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false) - - -@btime solve(prob_ode_brusselator_2d,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) -@btime solve(prob_ode_brusselator_2d_sparse,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) - - -using DiffEqOperators -Jv = JacVecOperator(brusselator_2d_loop,u0,p,0.0) - - -f = ODEFunction(brusselator_2d_loop;jac_prototype=Jv) -prob_ode_brusselator_2d_jacfree = ODEProblem(f,u0,(0.,11.5),p) -@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) - - -using AlgebraicMultigrid -pc = aspreconditioner(ruge_stuben(jac_sparsity)) -@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES(Pl=pc)),save_everystep=false) - - -using Sundials -# Sparse Version -@btime solve(prob_ode_brusselator_2d_sparse,CVODE_BDF(),save_everystep=false) -# GMRES Version: Doesn't require any extra stuff! -@btime solve(prob_ode_brusselator_2d,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) - - -using DifferentialEquations -function rober(du,u,p,t) - y₁,y₂,y₃ = u - k₁,k₂,k₃ = p - du[1] = -k₁*y₁+k₃*y₂*y₃ - du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃ - du[3] = y₁ + y₂ + y₃ - 1 - nothing -end -M = [1. 0 0 - 0 1. 0 - 0 0 0] -f = ODEFunction(rober,mass_matrix=M) -prob_mm = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) -sol = solve(prob_mm,Rodas5()) - -plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) - diff --git a/script/exercises/01-workshop_exercises.jl b/script/exercises/01-workshop_exercises.jl deleted file mode 100644 index b55a08dd..00000000 --- a/script/exercises/01-workshop_exercises.jl +++ /dev/null @@ -1,61 +0,0 @@ - -A+Y -> X+P -X+Y -> 2P -A+X -> 2X + 2Z -2X -> A + P (note: this has rate kX^2!) -B + Z -> Y - - -t = 0.0:1.0:30.0 -data = [1.0 2.05224 2.11422 2.1857 2.26827 2.3641 2.47618 2.60869 2.7677 2.96232 3.20711 3.52709 3.97005 4.64319 5.86202 9.29322 536.068 82388.9 57868.4 1.00399 1.00169 1.00117 1.00094 1.00082 1.00075 1.0007 1.00068 1.00066 1.00065 1.00065 1.00065 - 2.0 1.9494 1.89645 1.84227 1.78727 1.73178 1.67601 1.62008 1.56402 1.50772 1.45094 1.39322 1.33366 1.2705 1.19958 1.10651 0.57194 0.180316 0.431409 251.774 591.754 857.464 1062.78 1219.05 1335.56 1419.88 1478.22 1515.63 1536.25 1543.45 1539.98 - 3.0 2.82065 2.68703 2.58974 2.52405 2.48644 2.47449 2.48686 2.52337 2.58526 2.67563 2.80053 2.9713 3.21051 3.5712 4.23706 12.0266 14868.8 24987.8 23453.4 19202.2 15721.6 12872.0 10538.8 8628.66 7064.73 5784.29 4735.96 3877.66 3174.94 2599.6] - - -t = 0.0:12.0:90.0 -data = [100.0 0.246196 0.000597933 0.24547 0.000596251 0.245275 0.000595453 0.245511 - 0.0 53.7939 16.8784 58.7789 18.3777 59.1879 18.5003 59.2611] - - -u0 = Float32[2.; 0.] -datasize = 30 -tspan = (0.0f0,1.5f0) - -function trueODEfunc(du,u,p,t) - true_A = [-0.1 2.0; -2.0 -0.1] - du .= ((u.^3)'true_A)' -end -t = range(tspan[1],tspan[2],length=datasize) -prob = ODEProblem(trueODEfunc,u0,tspan) -ode_data = Array(solve(prob,Tsit5(),saveat=t)) - - -function lotka_volterra(du,u,p,t) - x, y = u - α, β, δ, γ = p - du[1] = dx = α*x - β*x*y - du[2] = dy = -δ*y + γ*x*y -end -u0 = [1.0,1.0] -tspan = (0.0,10.0) -p = [1.5,1.0,3.0,1.0] -prob = ODEProblem(lotka_volterra,u0,tspan,p) -sol = Array(solve(prob,Tsit5())(0.0:1.0:10.0)) - - -function lotka_volterra(du,u,p,t) - x, y = u - α, β, δ, γ = p - du[1] = dx = α*x - β*x*y - du[2] = dy = -δ*y + γ*x*y -end -function lv_noise(du,u,p,t) - du[1] = p[5]*u[1] - du[2] = p[6]*u[2] -end -u0 = [1.0,1.0] -tspan = (0.0,10.0) -p = [1.5,1.0,3.0,1.0,0.1,0.1] -prob = SDEProblem(lotka_volterra,lv_noise,u0,tspan,p) -sol = [Array(solve(prob,SOSRI())(0.0:1.0:10.0)) for i in 1:20] # 20 solution samples - diff --git a/script/exercises/02-workshop_solutions.jl b/script/exercises/02-workshop_solutions.jl deleted file mode 100644 index 69ea0394..00000000 --- a/script/exercises/02-workshop_solutions.jl +++ /dev/null @@ -1,98 +0,0 @@ - -using DifferentialEquations, Plots -function orego(du,u,p,t) - s,q,w = p - y1,y2,y3 = u - du[1] = s*(y2+y1*(1-q*y1-y2)) - du[2] = (y3-(1+y1)*y2)/s - du[3] = w*(y1-y3) -end -p = [77.27,8.375e-6,0.161] -prob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,360.0),p) -sol = solve(prob) -plot(sol) - - -plot(sol,vars=(1,2,3)) - - -using BenchmarkTools -prob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,50.0),p) -@btime sol = solve(prob,Tsit5()) - - -@btime sol = solve(prob,Rodas5()) - - -function orego(du,u,p,t) - s,q,w = p - y1,y2,y3 = u - du[1] = s*(y2+y1*(1-q*y1-y2)) - du[2] = (y3-(1+y1)*y2)/s - du[3] = w*(y1-y3) -end -function g(du,u,p,t) - du[1] = 0.1u[1] - du[2] = 0.1u[2] - du[3] = 0.1u[3] -end -p = [77.27,8.375e-6,0.161] -prob = SDEProblem(orego,g,[1.0,2.0,3.0],(0.0,30.0),p) -sol = solve(prob,SOSRI()) -plot(sol) - - -sol = solve(prob,ImplicitRKMil()); plot(sol) - - -sol = solve(prob,ImplicitRKMil()); plot(sol) - - -function orego(du,u,p,t) - s,q,w = p - y1,y2,y3 = u - du[1] = s*(y2+y1*(1-q*y1-y2)) - du[2] = (y3-(1+y1)*y2)/s - du[3] = w*(y1-y3) -end -p = [60.0,1e-5,0.2] -prob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,30.0),p) -sol = solve(prob,Rodas5(),abstol=1/10^14,reltol=1/10^14) - - -function onecompartment(du,u,p,t) - Ka,Ke = p - du[1] = -Ka*u[1] - du[2] = Ka*u[1] - Ke*u[2] -end -p = (Ka=2.268,Ke=0.07398) -prob = ODEProblem(onecompartment,[100.0,0.0],(0.0,90.0),p) - -tstops = [24,48,72] -condition(u,t,integrator) = t ∈ tstops -affect!(integrator) = (integrator.u[1] += 100) -cb = DiscreteCallback(condition,affect!) -sol = solve(prob,Tsit5(),callback=cb,tstops=tstops) -plot(sol) - - -function onecompartment_delay(du,u,h,p,t) - Ka,Ke,τ = p - delayed_depot = h(p,t-τ)[1] - du[1] = -Ka*u[1] - du[2] = Ka*delayed_depot - Ke*u[2] -end -p = (Ka=2.268,Ke=0.07398,τ=6.0) -h(p,t) = [0.0,0.0] -prob = DDEProblem(onecompartment_delay,[100.0,0.0],h,(0.0,90.0),p) - -tstops = [24,48,72] -condition(u,t,integrator) = t ∈ tstops -affect!(integrator) = (integrator.u[1] += 100) -cb = DiscreteCallback(condition,affect!) -sol = solve(prob,MethodOfSteps(Rosenbrock23()),callback=cb,tstops=tstops) -plot(sol) - - -p = (Ka = 0.5, Ke = 0.1, τ = 4.0) - diff --git a/script/introduction/01-ode_introduction.jl b/script/introduction/01-ode_introduction.jl deleted file mode 100644 index 68022532..00000000 --- a/script/introduction/01-ode_introduction.jl +++ /dev/null @@ -1,180 +0,0 @@ - -f(u,p,t) = 0.98u - - -using DifferentialEquations -f(u,p,t) = 0.98u -u0 = 1.0 -tspan = (0.0,1.0) -prob = ODEProblem(f,u0,tspan) - - -sol = solve(prob) - - -using Plots; gr() -plot(sol) - - -plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line", - xaxis="Time (t)",yaxis="u(t) (in μm)",label="My Thick Line!") # legend=false - - -plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!") - - -sol.t - - -sol.u - - -[t+u for (u,t) in tuples(sol)] - - -sol - - -sol(0.45) - - -sol = solve(prob,abstol=1e-8,reltol=1e-8) - - -plot(sol) -plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!") - - -sol = solve(prob,saveat=0.1) - - -sol = solve(prob,saveat=[0.2,0.7,0.9]) - - -sol = solve(prob,dense=false) - - -sol = solve(prob,save_everystep=false) - - -sol = solve(prob,save_everystep=false,save_start = false) - - -sol = solve(prob,alg_hints=[:stiff]) - - -sol = solve(prob,Tsit5(),reltol=1e-6) - - -function lorenz!(du,u,p,t) - σ,ρ,β = p - du[1] = σ*(u[2]-u[1]) - du[2] = u[1]*(ρ-u[3]) - u[2] - du[3] = u[1]*u[2] - β*u[3] -end - - -u0 = [1.0,0.0,0.0] - - -p = (10,28,8/3) # we could also make this an array, or any other type! - - -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan,p) - - -sol = solve(prob) - - -sol.t[10],sol[10] - - -sol[2,10] - - -A = Array(sol) - - -plot(sol) - - -plot(sol,vars=(1,2,3)) - - -plot(sol,vars=(1,2,3),denseplot=false) - - -plot(sol,vars=(0,2)) - - -function lotka_volterra!(du,u,p,t) - du[1] = p[1]*u[1] - p[2]*u[1]*u[2] - du[2] = -p[3]*u[2] + p[4]*u[1]*u[2] -end - - -using ParameterizedFunctions -lv! = @ode_def LotkaVolterra begin - dx = a*x - b*x*y - dy = -c*y + d*x*y -end a b c d - - -u0 = [1.0,1.0] -p = (1.5,1.0,3.0,1.0) -tspan = (0.0,10.0) -prob = ODEProblem(lv!,u0,tspan,p) -sol = solve(prob) -plot(sol) - - -lv!.Jex - - -A = [1. 0 0 -5 - 4 -2 4 -3 - -4 0 0 1 - 5 -2 2 3] -u0 = rand(4,2) -tspan = (0.0,1.0) -f(u,p,t) = A*u -prob = ODEProblem(f,u0,tspan) -sol = solve(prob) - - -sol[3] - - -big_u0 = big.(u0) - - -prob = ODEProblem(f,big_u0,tspan) -sol = solve(prob) - - -sol[1,3] - - -prob = ODEProblem(f,big_u0,big.(tspan)) -sol = solve(prob) - - -using StaticArrays -A = @SMatrix [ 1.0 0.0 0.0 -5.0 - 4.0 -2.0 4.0 -3.0 - -4.0 0.0 0.0 1.0 - 5.0 -2.0 2.0 3.0] -u0 = @SMatrix rand(4,2) -tspan = (0.0,1.0) -f(u,p,t) = A*u -prob = ODEProblem(f,u0,tspan) -sol = solve(prob) - - -sol[3] - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/introduction/02-choosing_algs.jl b/script/introduction/02-choosing_algs.jl deleted file mode 100644 index b6701eff..00000000 --- a/script/introduction/02-choosing_algs.jl +++ /dev/null @@ -1,49 +0,0 @@ - -using DifferentialEquations, ParameterizedFunctions -van! = @ode_def VanDerPol begin - dy = μ*((1-x^2)*y - x) - dx = 1*y -end μ - -prob = ODEProblem(van!,[0.0,2.0],(0.0,6.3),1e6) - - -sol = solve(prob,Tsit5()) - - -sol = solve(prob,alg_hints = [:stiff]) - - -sol = solve(prob) - - -using Plots; gr() -sol = solve(prob,alg_hints = [:stiff],reltol=1e-6) -plot(sol,denseplot=false) - - -plot(sol,ylims = (-10.0,10.0)) - - -function lorenz!(du,u,p,t) - σ,ρ,β = p - du[1] = σ*(u[2]-u[1]) - du[2] = u[1]*(ρ-u[3]) - u[2] - du[3] = u[1]*u[2] - β*u[3] -end -u0 = [1.0,0.0,0.0] -p = (10,28,8/3) -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan,p) - - -using BenchmarkTools -@btime solve(prob); - - -@btime solve(prob,alg_hints = [:stiff]); - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/introduction/03-optimizing_diffeq_code.jl b/script/introduction/03-optimizing_diffeq_code.jl deleted file mode 100644 index f9e55ece..00000000 --- a/script/introduction/03-optimizing_diffeq_code.jl +++ /dev/null @@ -1,325 +0,0 @@ - -function lorenz(u,p,t) - dx = 10.0*(u[2]-u[1]) - dy = u[1]*(28.0-u[3]) - u[2] - dz = u[1]*u[2] - (8/3)*u[3] - [dx,dy,dz] -end - - -using DifferentialEquations, BenchmarkTools -u0 = [1.0;0.0;0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz,u0,tspan) -@benchmark solve(prob,Tsit5()) - - -@benchmark solve(prob,Tsit5(),save_everystep=false) - - -function lorenz!(du,u,p,t) - du[1] = 10.0*(u[2]-u[1]) - du[2] = u[1]*(28.0-u[3]) - u[2] - du[3] = u[1]*u[2] - (8/3)*u[3] -end - - -u0 = [1.0;0.0;0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan) -@benchmark solve(prob,Tsit5()) - - -@benchmark solve(prob,Tsit5(),save_everystep=false) - - -tspan = (0.0,500.0) # 5x longer than before -prob = ODEProblem(lorenz!,u0,tspan) -@benchmark solve(prob,Tsit5(),save_everystep=false) - - -using StaticArrays -A = @SVector [2.0,3.0,5.0] - - -function lorenz_static(u,p,t) - dx = 10.0*(u[2]-u[1]) - dy = u[1]*(28.0-u[3]) - u[2] - dz = u[1]*u[2] - (8/3)*u[3] - @SVector [dx,dy,dz] -end - - -u0 = @SVector [1.0,0.0,0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz_static,u0,tspan) -@benchmark solve(prob,Tsit5()) - - -@benchmark solve(prob,Tsit5(),save_everystep=false) - - -A = rand(1000,1000); B = rand(1000,1000); C = rand(1000,1000) -test(A,B,C) = A + B + C -@benchmark test(A,B,C) - - -test2(A,B,C) = map((a,b,c)->a+b+c,A,B,C) -@benchmark test2(A,B,C) - - -function test3(A,B,C) - D = similar(A) - @inbounds for i in eachindex(A) - D[i] = A[i] + B[i] + C[i] - end - D -end -@benchmark test3(A,B,C) - - -test4(A,B,C) = A .+ B .+ C -@benchmark test4(A,B,C) - - -sin.(A) .+ sin.(B) - - -test5(A,B,C) = @. A + B + C #only one array allocated -@benchmark test5(A,B,C) - - -D = zeros(1000,1000); - - -test6!(D,A,B,C) = D .= A .+ B .+ C #only one array allocated -@benchmark test6!(D,A,B,C) - - -test7!(D,A,B,C) = @. D = A + B + C #only one array allocated -@benchmark test7!(D,A,B,C) - - -test8!(D,A,B,C) = map!((a,b,c)->a+b+c,D,A,B,C) -@benchmark test8!(D,A,B,C) - - -@benchmark A*B - - -using LinearAlgebra -@benchmark mul!(D,A,B) # same as D = A * B - - -# Generate the constants -p = (1.0,1.0,1.0,10.0,0.001,100.0) # a,α,ubar,β,D1,D2 -N = 100 -Ax = Array(Tridiagonal([1.0 for i in 1:N-1],[-2.0 for i in 1:N],[1.0 for i in 1:N-1])) -Ay = copy(Ax) -Ax[2,1] = 2.0 -Ax[end-1,end] = 2.0 -Ay[1,2] = 2.0 -Ay[end,end-1] = 2.0 - -function basic_version!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = r[:,:,1] - v = r[:,:,2] - Du = D1*(Ay*u + u*Ax) - Dv = D2*(Ay*v + v*Ax) - dr[:,:,1] = Du .+ a.*u.*u./v .+ ubar .- α*u - dr[:,:,2] = Dv .+ a.*u.*u .- β*v -end - -a,α,ubar,β,D1,D2 = p -uss = (ubar+β)/α -vss = (a/β)*uss^2 -r0 = zeros(100,100,2) -r0[:,:,1] .= uss.+0.1.*rand.() -r0[:,:,2] .= vss - -prob = ODEProblem(basic_version!,r0,(0.0,0.1),p) - - -@benchmark solve(prob,Tsit5()) - - -A = rand(4) -@show A -B = @view A[1:3] -B[2] = 2 -@show A - - -function gm2!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - Du = D1*(Ay*u + u*Ax) - Dv = D2*(Ay*v + v*Ax) - @. du = Du + a.*u.*u./v + ubar - α*u - @. dv = Dv + a.*u.*u - β*v -end -prob = ODEProblem(gm2!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) - - -Ayu = zeros(N,N) -uAx = zeros(N,N) -Du = zeros(N,N) -Ayv = zeros(N,N) -vAx = zeros(N,N) -Dv = zeros(N,N) -function gm3!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - mul!(Ayu,Ay,u) - mul!(uAx,u,Ax) - mul!(Ayv,Ay,v) - mul!(vAx,v,Ax) - @. Du = D1*(Ayu + uAx) - @. Dv = D2*(Ayv + vAx) - @. du = Du + a*u*u./v + ubar - α*u - @. dv = Dv + a*u*u - β*v -end -prob = ODEProblem(gm3!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) - - -p = (1.0,1.0,1.0,10.0,0.001,100.0,Ayu,uAx,Du,Ayv,vAx,Dv) # a,α,ubar,β,D1,D2 -function gm4!(dr,r,p,t) - a,α,ubar,β,D1,D2,Ayu,uAx,Du,Ayv,vAx,Dv = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - mul!(Ayu,Ay,u) - mul!(uAx,u,Ax) - mul!(Ayv,Ay,v) - mul!(vAx,v,Ax) - @. Du = D1*(Ayu + uAx) - @. Dv = D2*(Ayv + vAx) - @. du = Du + a*u*u./v + ubar - α*u - @. dv = Dv + a*u*u - β*v -end -prob = ODEProblem(gm4!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) - - -p = (1.0,1.0,1.0,10.0,0.001,100.0,N) -function fast_gm!(du,u,p,t) - a,α,ubar,β,D1,D2,N = p - - @inbounds for j in 2:N-1, i in 2:N-1 - du[i,j,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - - @inbounds for j in 2:N-1, i in 2:N-1 - du[i,j,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds for j in 2:N-1 - i = 1 - du[1,j,1] = D1*(2u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for j in 2:N-1 - i = 1 - du[1,j,2] = D2*(2u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - @inbounds for j in 2:N-1 - i = N - du[end,j,1] = D1*(2u[i-1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for j in 2:N-1 - i = N - du[end,j,2] = D2*(2u[i-1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds for i in 2:N-1 - j = 1 - du[i,1,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for i in 2:N-1 - j = 1 - du[i,1,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - @inbounds for i in 2:N-1 - j = N - du[i,end,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for i in 2:N-1 - j = N - du[i,end,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds begin - i = 1; j = 1 - du[1,1,1] = D1*(2u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[1,1,2] = D2*(2u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = 1; j = N - du[1,N,1] = D1*(2u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[1,N,2] = D2*(2u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = N; j = 1 - du[N,1,1] = D1*(2u[i-1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[N,1,2] = D2*(2u[i-1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = N; j = N - du[end,end,1] = D1*(2u[i-1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[end,end,2] = D2*(2u[i-1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end -end -prob = ODEProblem(fast_gm!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) - - -prob = ODEProblem(fast_gm!,r0,(0.0,10.0),p) -@benchmark solve(prob,Tsit5()) - - -using Sundials -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES)) - - -prob = ODEProblem(fast_gm!,r0,(0.0,100.0),p) -# Will go out of memory if we don't turn off `save_everystep`! -@benchmark solve(prob,Tsit5(),save_everystep=false) - - -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES)) - - -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) - - -prob = ODEProblem(fast_gm!,r0,(0.0,500.0),p) -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/introduction/04-callbacks_and_events.jl b/script/introduction/04-callbacks_and_events.jl deleted file mode 100644 index d479d7a1..00000000 --- a/script/introduction/04-callbacks_and_events.jl +++ /dev/null @@ -1,164 +0,0 @@ - -using DifferentialEquations, ParameterizedFunctions -ball! = @ode_def BallBounce begin - dy = v - dv = -g -end g - - -function condition(u,t,integrator) - u[1] -end - - -function affect!(integrator) - integrator.u[2] = -integrator.p[2] * integrator.u[2] -end - - -bounce_cb = ContinuousCallback(condition,affect!) - - -u0 = [50.0,0.0] -tspan = (0.0,15.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=bounce_cb) - - -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) - - -function condition_kick(u,t,integrator) - t == 2 -end - - -function affect_kick!(integrator) - integrator.u[2] += 50 -end - - -kick_cb = DiscreteCallback(condition_kick,affect_kick!) -u0 = [50.0,0.0] -tspan = (0.0,10.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=kick_cb) - - -sol = solve(prob,Tsit5(),tstops=[2.0]) -plot(sol) - - -cb = CallbackSet(bounce_cb,kick_cb) - - -u0 = [50.0,0.0] -tspan = (0.0,15.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=cb) -sol = solve(prob,Tsit5(),tstops=[2.0]) -plot(sol) - - -u0 = [1.,0.] -harmonic! = @ode_def HarmonicOscillator begin - dv = -x - dx = v -end -tspan = (0.0,10.0) -prob = ODEProblem(harmonic!,u0,tspan) -sol = solve(prob) -plot(sol) - - -function terminate_affect!(integrator) - terminate!(integrator) -end - - -function terminate_condition(u,t,integrator) - u[2] -end -terminate_cb = ContinuousCallback(terminate_condition,terminate_affect!) - - -sol = solve(prob,callback=terminate_cb) -plot(sol) - - -sol.t[end] - - -terminate_upcrossing_cb = ContinuousCallback(terminate_condition,terminate_affect!,nothing) - - -sol = solve(prob,callback=terminate_upcrossing_cb) -plot(sol) - - -tspan = (0.0,10000.0) -prob = ODEProblem(harmonic!,u0,tspan) -sol = solve(prob) -gr(fmt=:png) # Make it a PNG instead of an SVG since there's a lot of points! -plot(sol,vars=(1,2)) - - -plot(sol,vars=(0,1),denseplot=false) - - -plot(sol.t,[u[2]^2 + u[1]^2 for u in sol.u]) # Energy ~ x^2 + v^2 - - -function g(resid,u,p,t) - resid[1] = u[2]^2 + u[1]^2 - 1 - resid[2] = 0 -end - - -cb = ManifoldProjection(g) -sol = solve(prob,callback=cb) -plot(sol,vars=(1,2)) - - -plot(sol,vars=(0,1),denseplot=false) - - -u1,u2 = sol[500] -u2^2 + u1^2 - - -prob = ODEProblem((du,u,p,t)->du.=u,rand(1000,1000),(0.0,1.0)) - - -saved_values = SavedValues(Float64, Tuple{Float64,Float64}) - - -using LinearAlgebra -cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values) - - -sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving - - -saved_values.t - - -saved_values.saveval - - -saved_values = SavedValues(Float64, Tuple{Float64,Float64}) # New cache -cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values, saveat = 0.0:0.1:1.0) -sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving - - -saved_values.t - - -saved_values.saveval - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/introduction/05-formatting_plots.jl b/script/introduction/05-formatting_plots.jl deleted file mode 100644 index c76d1483..00000000 --- a/script/introduction/05-formatting_plots.jl +++ /dev/null @@ -1,57 +0,0 @@ - -using DifferentialEquations, Plots, ParameterizedFunctions -gr() -lorenz = @ode_def Lorenz begin - dx = σ*(y-x) - dy = ρ*x-y-x*z - dz = x*y-β*z -end σ β ρ - -p = [10.0,8/3,28] -u0 = [1., 5., 10.] -tspan = (0., 100.) -prob = ODEProblem(lorenz, u0, tspan, p) -sol = solve(prob) - - -plot(sol) - - -plot(sol,vars=(1, 2, 3)) - - -plot(sol,vars=[:x]) - - -plot(sol,vars=(1,2,3)) -plot(sol,vars=[1]) - - -plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line", -xaxis="Time (t)",yaxis="u(t) (in mm)",label=["X","Y","Z"]) - - -scatter(sol,vars=[:x]) - - -plot(sol,vars=(1,2,3),denseplot=false) - - -plot(sol,vars=(1,2,3),plotdensity=100) - - -plot(sol,vars=(1,2,3),plotdensity=10000) - - -plot(sol,vars=(1,2,3)) -scatter!(sol,vars=(1,2,3),plotdensity=100) - - -p = plot(sol,vars=(1,2,3)) -scatter!(p,sol,vars=(1,2,3),plotdensity=100) -title!("I added a title") - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/models/01-classical_physics.jl b/script/models/01-classical_physics.jl deleted file mode 100644 index f99a604b..00000000 --- a/script/models/01-classical_physics.jl +++ /dev/null @@ -1,234 +0,0 @@ - -using OrdinaryDiffEq, Plots -gr() - -#Half-life of Carbon-14 is 5,730 years. -C₁ = 5.730 - -#Setup -u₀ = 1.0 -tspan = (0.0, 1.0) - -#Define the problem -radioactivedecay(u,p,t) = -C₁*u - -#Pass to solver -prob = ODEProblem(radioactivedecay,u₀,tspan) -sol = solve(prob,Tsit5()) - -#Plot -plot(sol,linewidth=2,title ="Carbon-14 half-life", xaxis = "Time in thousands of years", yaxis = "Percentage left", label = "Numerical Solution") -plot!(sol.t, t->exp(-C₁*t),lw=3,ls=:dash,label="Analytical Solution") - - -# Simple Pendulum Problem -using OrdinaryDiffEq, Plots - -#Constants -const g = 9.81 -L = 1.0 - -#Initial Conditions -u₀ = [0,π/2] -tspan = (0.0,6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L)*sin(θ) -end - -#Pass to solvers -prob = ODEProblem(simplependulum,u₀, tspan) -sol = solve(prob,Tsit5()) - -#Plot -plot(sol,linewidth=2,title ="Simple Pendulum Problem", xaxis = "Time", yaxis = "Height", label = ["Theta","dTheta"]) - - -p = plot(sol,vars = (1,2), xlims = (-9,9), title = "Phase Space Plot", xaxis = "Velocity", yaxis = "Position", leg=false) -function phase_plot(prob, u0, p, tspan=2pi) - _prob = ODEProblem(prob.f,u0,(0.0,tspan)) - sol = solve(_prob,Vern9()) # Use Vern9 solver for higher accuracy - plot!(p,sol,vars = (1,2), xlims = nothing, ylims = nothing) -end -for i in -4pi:pi/2:4π - for j in -4pi:pi/2:4π - phase_plot(prob, [j,i], p) - end -end -plot(p,xlims = (-9,9)) - - -#Double Pendulum Problem -using OrdinaryDiffEq, Plots - -#Constants and setup -const m₁, m₂, L₁, L₂ = 1, 2, 1, 2 -initial = [0, π/3, 0, 3pi/5] -tspan = (0.,50.) - -#Convenience function for transforming from polar to Cartesian coordinates -function polar2cart(sol;dt=0.02,l1=L₁,l2=L₂,vars=(2,4)) - u = sol.t[1]:dt:sol.t[end] - - p1 = l1*map(x->x[vars[1]], sol.(u)) - p2 = l2*map(y->y[vars[2]], sol.(u)) - - x1 = l1*sin.(p1) - y1 = l1*-cos.(p1) - (u, (x1 + l2*sin.(p2), - y1 - l2*cos.(p2))) -end - -#Define the Problem -function double_pendulum(xdot,x,p,t) - xdot[1]=x[2] - xdot[2]=-((g*(2*m₁+m₂)*sin(x[1])+m₂*(g*sin(x[1]-2*x[3])+2*(L₂*x[4]^2+L₁*x[2]^2*cos(x[1]-x[3]))*sin(x[1]-x[3])))/(2*L₁*(m₁+m₂-m₂*cos(x[1]-x[3])^2))) - xdot[3]=x[4] - xdot[4]=(((m₁+m₂)*(L₁*x[2]^2+g*cos(x[1]))+L₂*m₂*x[4]^2*cos(x[1]-x[3]))*sin(x[1]-x[3]))/(L₂*(m₁+m₂-m₂*cos(x[1]-x[3])^2)) -end - -#Pass to Solvers -double_pendulum_problem = ODEProblem(double_pendulum, initial, tspan) -sol = solve(double_pendulum_problem, Vern7(), abs_tol=1e-10, dt=0.05); - - -#Obtain coordinates in Cartesian Geometry -ts, ps = polar2cart(sol, l1=L₁, l2=L₂, dt=0.01) -plot(ps...) - - -#Constants and setup -using OrdinaryDiffEq -initial2 = [0.01, 0.005, 0.01, 0.01] -tspan2 = (0.,200.) - -#Define the problem -function double_pendulum_hamiltonian(udot,u,p,t) - α = u[1] - lα = u[2] - β = u[3] - lβ = u[4] - udot .= - [2(lα-(1+cos(β))lβ)/(3-cos(2β)), - -2sin(α) - sin(α+β), - 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)), - -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)] -end - -# Construct a ContiunousCallback -condition(u,t,integrator) = u[1] -affect!(integrator) = nothing -cb = ContinuousCallback(condition,affect!,nothing, - save_positions = (true,false)) - -# Construct Problem -poincare = ODEProblem(double_pendulum_hamiltonian, initial2, tspan2) -sol2 = solve(poincare, Vern9(), save_everystep = false, callback=cb, abstol=1e-9) - -function poincare_map(prob, u₀, p; callback=cb) - _prob = ODEProblem(prob.f,[0.01, 0.01, 0.01, u₀],prob.tspan) - sol = solve(_prob, Vern9(), save_everystep = false, callback=cb, abstol=1e-9) - scatter!(p, sol, vars=(3,4), markersize = 2) -end - - -p = scatter(sol2, vars=(3,4), leg=false, markersize = 2, ylims=(-0.01,0.03)) -for i in -0.01:0.00125:0.01 - poincare_map(poincare, i, p) -end -plot(p,ylims=(-0.01,0.03)) - - -using OrdinaryDiffEq, Plots - -#Setup -initial = [0.,0.1,0.5,0] -tspan = (0,100.) - -#Remember, V is the potential of the system and T is the Total Kinetic Energy, thus E will -#the total energy of the system. -V(x,y) = 1//2 * (x^2 + y^2 + 2x^2*y - 2//3 * y^3) -E(x,y,dx,dy) = V(x,y) + 1//2 * (dx^2 + dy^2); - -#Define the function -function Hénon_Heiles(du,u,p,t) - x = u[1] - y = u[2] - dx = u[3] - dy = u[4] - du[1] = dx - du[2] = dy - du[3] = -x - 2x*y - du[4] = y^2 - y -x^2 -end - -#Pass to solvers -prob = ODEProblem(Hénon_Heiles, initial, tspan) -sol = solve(prob, Vern9(), abs_tol=1e-16, rel_tol=1e-16); - - -# Plot the orbit -plot(sol, vars=(1,2), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false) - - -#Optional Sanity check - what do you think this returns and why? -@show sol.retcode - -#Plot - -plot(sol, vars=(1,3), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity") -plot!(sol, vars=(2,4), leg = false) - - -#We map the Total energies during the time intervals of the solution (sol.u here) to a new vector -#pass it to the plotter a bit more conveniently -energy = map(x->E(x...), sol.u) - -#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great. -@show ΔE = energy[1]-energy[end] - -#Plot -plot(sol.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") - - -function HH_acceleration!(dv,v,u,p,t) - x,y = u - dx,dy = dv - dv[1] = -x - 2x*y - dv[2] = y^2 - y -x^2 -end -initial_positions = [0.0,0.1] -initial_velocities = [0.5,0.0] -prob = SecondOrderODEProblem(HH_acceleration!,initial_velocities,initial_positions,tspan) -sol2 = solve(prob, KahanLi8(), dt=1/10); - - -# Plot the orbit -plot(sol2, vars=(3,4), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false) - - -plot(sol2, vars=(3,1), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity") -plot!(sol2, vars=(4,2), leg = false) - - -energy = map(x->E(x[3], x[4], x[1], x[2]), sol2.u) -#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great. -@show ΔE = energy[1]-energy[end] - -#Plot -plot(sol2.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") - - -sol3 = solve(prob, DPRKN6()); -energy = map(x->E(x[3], x[4], x[1], x[2]), sol3.u) -@show ΔE = energy[1]-energy[end] -gr() -plot(sol3.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/models/02-conditional_dosing.jl b/script/models/02-conditional_dosing.jl deleted file mode 100644 index a43befed..00000000 --- a/script/models/02-conditional_dosing.jl +++ /dev/null @@ -1,50 +0,0 @@ - -using DifferentialEquations -function f(du,u,p,t) - du[1] = -u[1] -end -u0 = [10.0] -const V = 1 -prob = ODEProblem(f,u0,(0.0,10.0)) - - -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) - - -condition(u,t,integrator) = t==4 && u[1]/V<4 -affect!(integrator) = integrator.u[1] += 10 -cb = DiscreteCallback(condition,affect!) - - -sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb) -using Plots; gr() -plot(sol) - - -println(sol(4.00000)) -println(sol(4.000000000001)) - - -function f(du,u,p,t) - du[1] = -u[1]/6 -end -u0 = [10.0] -const V = 1 -prob = ODEProblem(f,u0,(0.0,10.0)) - - -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) - - -sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb) -using Plots; gr() -plot(sol) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/models/03-diffeqbio_I_introduction.jl b/script/models/03-diffeqbio_I_introduction.jl deleted file mode 100644 index 2500809d..00000000 --- a/script/models/03-diffeqbio_I_introduction.jl +++ /dev/null @@ -1,118 +0,0 @@ - -# If not already installed, first hit "]" within a Julia REPL. Then type: -# add DifferentialEquations DiffEqBiological PyPlot Plots Latexify - -using DifferentialEquations, DiffEqBiological, Plots, Latexify -pyplot(fmt=:svg); - - -repressilator = @reaction_network begin - hillr(P₃,α,K,n), ∅ --> m₁ - hillr(P₁,α,K,n), ∅ --> m₂ - hillr(P₂,α,K,n), ∅ --> m₃ - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - (δ,γ), m₃ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - β, m₃ --> m₃ + P₃ - μ, P₁ --> ∅ - μ, P₂ --> ∅ - μ, P₃ --> ∅ -end α K n δ γ β μ; - - -latexify(repressilator; env=:chemical) - - -mathjax = WEAVE_ARGS[:doctype] == "pdf" ? false : true -x = latexify(repressilator; env=:chemical, starred=true, mathjax=mathjax); -display("text/latex", "$x"); - - -latexify(repressilator, cdot=false) - - -x = latexify(repressilator, cdot=false, starred=true); -display("text/latex", "$x"); - - -speciesmap(repressilator) - - -paramsmap(repressilator) - - -# parameters [α,K,n,δ,γ,β,μ] -p = (.5, 40, 2, log(2)/120, 5e-3, 20*log(2)/120, log(2)/60) - -# initial condition [m₁,m₂,m₃,P₁,P₂,P₃] -u₀ = [0.,0.,0.,20.,0.,0.] - -# time interval to solve on -tspan = (0., 10000.) - -# create the ODEProblem we want to solve -oprob = ODEProblem(repressilator, u₀, tspan, p) - - -sol = solve(oprob, saveat=10.) -plot(sol, fmt=:svg) - - -# first we redefine the initial condition to be integer valued -u₀ = [0,0,0,20,0,0] - -# next we create a discrete problem to encode that our species are integer valued: -dprob = DiscreteProblem(repressilator, u₀, tspan, p) - -# now we create a JumpProblem, and specify Gillespie's Direct Method as the solver: -jprob = JumpProblem(dprob, Direct(), repressilator, save_positions=(false,false)) - -# now let's solve and plot the jump process: -sol = solve(jprob, SSAStepper(), saveat=10.) -plot(sol, fmt=:svg) - - -rjs = regularjumps(repressilator) -lprob = JumpProblem(dprob, Direct(), rjs) -lsol = solve(lprob, SimpleTauLeaping(), dt=.1) -plot(lsol, plotdensity=1000, fmt=:svg) - - -bdp = @reaction_network begin - c₁, X --> 2X - c₂, X --> 0 - c₃, 0 --> X -end c₁ c₂ c₃ -p = (1.0,2.0,50.) -u₀ = [5.] -tspan = (0.,4.); - - -latexify(bdp, noise=true, cdot=false) - - -x = latexify(bdp, noise=true, cdot=false, starred=true); -display("text/latex", "$x"); - - -# SDEProblem for CLE -sprob = SDEProblem(bdp, u₀, tspan, p) - -# solve and plot, tstops is used to specify enough points -# that the plot looks well-resolved -sol = solve(sprob, tstops=range(0., step=4e-3, length=1001)) -plot(sol, fmt=:svg) - - -latexify(jacobianexprs(repressilator), cdot=false) - - -x = latexify(jacobianexprs(repressilator), cdot=false, starred=true); -display("text/latex", "$x"); - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) - diff --git a/script/models/04-diffeqbio_II_networkproperties.jl b/script/models/04-diffeqbio_II_networkproperties.jl deleted file mode 100644 index 29fe117b..00000000 --- a/script/models/04-diffeqbio_II_networkproperties.jl +++ /dev/null @@ -1,190 +0,0 @@ - -using DifferentialEquations, DiffEqBiological, Latexify, Plots -fmt = :svg -pyplot(fmt=fmt) -rn = @reaction_network begin - hillr(D₂,α,K,n), ∅ --> m₁ - hillr(D₁,α,K,n), ∅ --> m₂ - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - μ, P₁ --> ∅ - μ, P₂ --> ∅ - (k₊,k₋), 2P₁ ↔ D₁ - (k₊,k₋), 2P₂ ↔ D₂ - (k₊,k₋), P₁+P₂ ↔ T -end α K n δ γ β μ k₊ k₋; - - -latexify(rn; env=:chemical) - - -x = latexify(rn; env=:chemical, starred=true, mathjax=true); -display("text/latex", "$x"); - - -species(rn) - - -params(rn) - - -substratesymstoich(rn, 11) - - -substratesymstoich.(rn, 1:numreactions(rn)) - - -netstoich.(rn, 1:numreactions(rn)) - - -rxtospecies_depgraph(rn) - - -species(rn)[[3,4,7]] - - -speciestorx_depgraph(rn)[1] - - -findall(depset -> in(:m₁, depset), dependents.(rn, 1:numreactions(rn))) - - -rxtorx_depgraph(rn) - - -rnmin = @min_reaction_network begin - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - μ, P₁ --> ∅ - μ, P₂ --> ∅ -end δ γ β μ; - - -addspecies!(rnmin, :D₁) -addspecies!(rnmin, :D₂) -addspecies!(rnmin, :T) - - -addparam!(rnmin, :α) -addparam!(rnmin, :K) -addparam!(rnmin, :n) -addparam!(rnmin, :k₊) -addparam!(rnmin, :k₋) - - -addreaction!(rnmin, :(hillr(D₁,α,K,n)), :(∅ --> m₂)) -addreaction!(rnmin, :((k₊,k₋)), :(2P₂ ↔ D₂)) -addreaction!(rnmin, :k₊, :(2P₁ --> D₁)) -addreaction!(rnmin, :k₋, :(D₁ --> 2P₁)) - - -# signature is addreaction!(rnmin, paramexpr, substratestoich, productstoich) -addreaction!(rnmin, :(hillr(D₂,α,K,n)), (), (:m₁ => 1,)) -addreaction!(rnmin, :k₊, (:P₁=>1, :P₂=>1), (:T=>1,)) -addreaction!(rnmin, :k₋, (:T=>1,), (:P₁=>1, :P₂=>1)) - - -setdiff(species(rn), species(rnmin)) - - -setdiff(params(rn), params(rnmin)) - - -rxidx = numreactions(rn) -setdiff(substrates(rn, rxidx), substrates(rnmin, rxidx)) - - -setdiff(products(rn, rxidx), products(rnmin, rxidx)) - - -rateexpr(rn, rxidx) == rateexpr(rnmin, rxidx) - - -addodes!(rnmin) - - -odeexprs(rnmin) - - -latexify(rnmin) - - -x = latexify(rnmin, starred=true); -display("text/latex", "$x"); - - -latexify(jacobianexprs(rnmin)) - - -x = latexify(jacobianexprs(rnmin), starred=true); -display("text/latex", "$x"); - - -N = 64 -h = 1 / N - - -rn = @empty_reaction_network - -for i = 1:N - addspecies!(rn, Symbol(:u, i)) -end - - -addparam!(rn, :β) - - -for i = 1:N - (i < N) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i+1)=>1,)) - (i > 1) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i-1)=>1,)) -end - - -addodes!(rn) - - -u₀ = zeros(N) -u₀[div(N,2)] = 10000 -p = [1/(h*h)] -tspan = (0.,.01) -oprob = ODEProblem(rn, u₀, tspan, p) - - -sol = solve(oprob, Rodas5()) -times = [0., .0001, .001, .01] -plt = plot() -for time in times - plot!(plt, 1:N, sol(time), fmt=fmt, xlabel="i", ylabel="uᵢ", label=string("t = ", time), lw=3) -end -plot(plt, ylims=(0.,10000.)) - - -addjumps!(rn, build_regular_jumps=false, minimal_jumps=true) - -# make the initial condition integer valued -u₀ = zeros(Int, N) -u₀[div(N,2)] = 10000 - -# setup and solve the problem -dprob = DiscreteProblem(rn, u₀, tspan, p) -jprob = JumpProblem(dprob, DirectCR(), rn, save_positions=(false,false)) -jsol = solve(jprob, SSAStepper(), saveat=times) - - -times = [0., .0001, .001, .01] -plts = [] -for i = 1:4 - b = bar(1:N, jsol[i], legend=false, fmt=fmt, xlabel="i", ylabel="uᵢ", title=string("t = ", times[i])) - plot!(b,sol(times[i])) - push!(plts,b) -end -plot(plts...) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) - diff --git a/script/models/04b-diffeqbio_III_steadystates.jl b/script/models/04b-diffeqbio_III_steadystates.jl deleted file mode 100644 index f01b21ff..00000000 --- a/script/models/04b-diffeqbio_III_steadystates.jl +++ /dev/null @@ -1,90 +0,0 @@ - -using DiffEqBiological, Plots -gr(); default(fmt = :png); - - -bistable_switch = @reaction_network begin - d, (X,Y) → ∅ - hillR(Y,v1,K1,n1), ∅ → X - hillR(X,v2,K2,n2), ∅ → Y -end d v1 K1 n1 v2 K2 n2 -d = 0.01; -v1 = 1.5; K1 = 30; n1 = 3; -v2 = 1.; K2 = 30; n2 = 3; -bistable_switch_p = [d, v1 ,K1, n1, v2, K2, n2]; - - -ss = steady_states(bistable_switch, bistable_switch_p) - - -stability(ss,bistable_switch, bistable_switch_p) - - -rn1 = @reaction_network begin - p, ∅ → X - hill(X,v,K,n), X → ∅ -end p v K n -p1 = [1.,2.5,1.5,1.5] -steady_states(rn1,p1) - - -rn2 = @reaction_network begin - p, ∅ → X - log(X), X → ∅ -end p -p2 = [1.] -steady_states(rn2,p2) - - -bif = bifurcations(bistable_switch, bistable_switch_p, :v1, (.1,5.)) -plot(bif,ylabel="[X]",label="") -plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) - - -plot(bif,2,ylabel="[Y]") -plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) - - -bif = bifurcations(bistable_switch, bistable_switch_p,:v1,(.1,10.)) -plot(bif,linewidth=1.,title="A bifurcation diagram",ylabel="Steady State concentration") -plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) - - -bif = bifurcation_grid(bistable_switch, bistable_switch_p,:n1,1.:5.) -plot(bif) -scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) - - -bif = bifurcation_grid_diagram(bistable_switch, bistable_switch_p,:n1,0.:4.,:v1,(.1,5.)) -plot(bif) -plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) - - -bif = bifurcation_grid_2d(bistable_switch, bistable_switch_p,:n1,1.:3.,:n2,1.:10.) -plot(bif) -scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) - - -brusselator = @reaction_network begin - A, ∅ → X - 1, 2X + Y → 3X - B, X → Y - 1, X → ∅ -end A B; -A = 0.5; B = 4.; -brusselator_p = [A, B]; - - -bif = bifurcations(brusselator,brusselator_p,:B,(0.1,2.5)) -plot(bif,2) -plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"]) - - -bif = bifurcation_grid_diagram(brusselator,brusselator_p,:B,0.5:0.02:5.0,:A,(0.2,5.0)) -plot(bif) -plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"]) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) - diff --git a/script/models/05-kepler_problem.jl b/script/models/05-kepler_problem.jl deleted file mode 100644 index be9f97b1..00000000 --- a/script/models/05-kepler_problem.jl +++ /dev/null @@ -1,99 +0,0 @@ - -using OrdinaryDiffEq, LinearAlgebra, ForwardDiff, Plots; gr() -H(q,p) = norm(p)^2/2 - inv(norm(q)) -L(q,p) = q[1]*p[2] - p[1]*q[2] - -pdot(dp,p,q,params,t) = ForwardDiff.gradient!(dp, q->-H(q, p), q) -qdot(dq,p,q,params,t) = ForwardDiff.gradient!(dq, p-> H(q, p), p) - -initial_position = [.4, 0] -initial_velocity = [0., 2.] -initial_cond = (initial_position, initial_velocity) -initial_first_integrals = (H(initial_cond...), L(initial_cond...)) -tspan = (0,20.) -prob = DynamicalODEProblem(pdot, qdot, initial_velocity, initial_position, tspan) -sol = solve(prob, KahanLi6(), dt=1//10); - - -plot_orbit(sol) = plot(sol,vars=(3,4), lab="Orbit", title="Kepler Problem Solution") - -function plot_first_integrals(sol, H, L) - plot(initial_first_integrals[1].-map(u->H(u[2,:], u[1,:]), sol.u), lab="Energy variation", title="First Integrals") - plot!(initial_first_integrals[2].-map(u->L(u[2,:], u[1,:]), sol.u), lab="Angular momentum variation") -end -analysis_plot(sol, H, L) = plot(plot_orbit(sol), plot_first_integrals(sol, H, L)) - - -analysis_plot(sol, H, L) - - -sol2 = solve(prob, DPRKN6()) # dt is not necessary, because unlike symplectic - # integrators DPRKN6 is adaptive -@show sol2.u |> length -analysis_plot(sol2, H, L) - - -sol3 = solve(prob, ERKN4()) # dt is not necessary, because unlike symplectic - # integrators ERKN4 is adaptive -@show sol3.u |> length -analysis_plot(sol3, H, L) - - -sol4 = solve(prob, Tsit5()) -@show sol4.u |> length -analysis_plot(sol4, H, L) - - -using DiffEqCallbacks - -plot_orbit2(sol) = plot(sol,vars=(1,2), lab="Orbit", title="Kepler Problem Solution") - -function plot_first_integrals2(sol, H, L) - plot(initial_first_integrals[1].-map(u->H(u[1:2],u[3:4]), sol.u), lab="Energy variation", title="First Integrals") - plot!(initial_first_integrals[2].-map(u->L(u[1:2],u[3:4]), sol.u), lab="Angular momentum variation") -end - -analysis_plot2(sol, H, L) = plot(plot_orbit2(sol), plot_first_integrals2(sol, H, L)) - -function hamiltonian(du,u,params,t) - q, p = u[1:2], u[3:4] - qdot(@view(du[1:2]), p, q, params, t) - pdot(@view(du[3:4]), p, q, params, t) -end - -prob2 = ODEProblem(hamiltonian, [initial_position; initial_velocity], tspan) -sol_ = solve(prob2, RK4(), dt=1//5, adaptive=false) -analysis_plot2(sol_, H, L) - - -function first_integrals_manifold(residual,u) - residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4]) - residual[3:4] .= initial_first_integrals[2] - L(u[1:2], u[3:4]) -end - -cb = ManifoldProjection(first_integrals_manifold) -sol5 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=cb) -analysis_plot2(sol5, H, L) - - -function energy_manifold(residual,u) - residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4]) - residual[3:4] .= 0 -end -energy_cb = ManifoldProjection(energy_manifold) -sol6 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=energy_cb) -analysis_plot2(sol6, H, L) - - -function angular_manifold(residual,u) - residual[1:2] .= initial_first_integrals[2] - L(u[1:2], u[3:4]) - residual[3:4] .= 0 -end -angular_cb = ManifoldProjection(angular_manifold) -sol7 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=angular_cb) -analysis_plot2(sol7, H, L) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/models/06-pendulum_bayesian_inference.jl b/script/models/06-pendulum_bayesian_inference.jl deleted file mode 100644 index 9ede02c3..00000000 --- a/script/models/06-pendulum_bayesian_inference.jl +++ /dev/null @@ -1,40 +0,0 @@ - -using DiffEqBayes, OrdinaryDiffEq, RecursiveArrayTools, Distributions, Plots, StatsPlots - - -function pendulum(du,u,p,t) - ω,L = p - x,y = u - du[1] = y - du[2] = - ω*y -(9.8/L)*sin(x) -end - -u0 = [1.0,0.1] -tspan = (0.0,10.0) -prob1 = ODEProblem(pendulum,u0,tspan,[1.0,2.5]) - - -sol = solve(prob1,Tsit5()) -plot(sol) - - -t = collect(range(1,stop=10,length=10)) -randomized = VectorOfArray([(sol(t[i]) + .01randn(2)) for i in 1:length(t)]) -data = convert(Array,randomized) - - -scatter!(data') - - -priors = [Uniform(0.1,3.0), Normal(3.0,1.0)] - - -bayesian_result = turing_inference(prob1,Tsit5(),t,data,priors;num_samples=10_000, - syms = [:omega,:L]) - - -plot(bayesian_result) - - -plot(bayesian_result, colordim = :parameter) - diff --git a/script/models/07-outer_solar_system.jl b/script/models/07-outer_solar_system.jl deleted file mode 100644 index b3ee7e84..00000000 --- a/script/models/07-outer_solar_system.jl +++ /dev/null @@ -1,36 +0,0 @@ - -using Plots, OrdinaryDiffEq, DiffEqPhysics, RecursiveArrayTools -gr() - -G = 2.95912208286e-4 -M = [1.00000597682, 0.000954786104043, 0.000285583733151, 0.0000437273164546, 0.0000517759138449, 1/1.3e8] -planets = ["Sun", "Jupiter", "Saturn", "Uranus", "Neptune", "Pluto"] - -pos_x = [0.0,-3.5023653,9.0755314,8.3101420,11.4707666,-15.5387357] -pos_y = [0.0,-3.8169847,-3.0458353,-16.2901086,-25.7294829,-25.2225594] -pos_z = [0.0,-1.5507963,-1.6483708,-7.2521278,-10.8169456,-3.1902382] -pos = ArrayPartition(pos_x,pos_y,pos_z) - -vel_x = [0.0,0.00565429,0.00168318,0.00354178,0.00288930,0.00276725] -vel_y = [0.0,-0.00412490,0.00483525,0.00137102,0.00114527,-0.00170702] -vel_z = [0.0,-0.00190589,0.00192462,0.00055029,0.00039677,-0.00136504] -vel = ArrayPartition(vel_x,vel_y,vel_z) - -tspan = (0.,200_000) - - -const ∑ = sum -const N = 6 -potential(p, t, x, y, z, M) = -G*∑(i->∑(j->(M[i]*M[j])/sqrt((x[i]-x[j])^2 + (y[i]-y[j])^2 + (z[i]-z[j])^2), 1:i-1), 2:N) - - -nprob = NBodyProblem(potential, M, pos, vel, tspan) -sol = solve(nprob,Yoshida6(), dt=100); - - -orbitplot(sol,body_names=planets) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/ode_extras/01-ModelingToolkit.jl b/script/ode_extras/01-ModelingToolkit.jl deleted file mode 100644 index 4d2f7a5b..00000000 --- a/script/ode_extras/01-ModelingToolkit.jl +++ /dev/null @@ -1,160 +0,0 @@ - -using ModelingToolkit - -### Define a differential equation system - -@parameters t σ ρ β -@variables x(t) y(t) z(t) -@derivatives D'~t - -eqs = [D(x) ~ σ*(y-x), - D(y) ~ x*(ρ-z)-y, - D(z) ~ x*y - β*z] -de = ODESystem(eqs) -ode_f = ODEFunction(de, [x,y,z], [σ,ρ,β]) - -### Use in DifferentialEquations.jl - -using OrdinaryDiffEq -u₀ = ones(3) -tspan = (0.0,100.0) -p = [10.0,28.0,10/3] -prob = ODEProblem(ode_f,u₀,tspan,p) -sol = solve(prob,Tsit5()) - -using Plots -plot(sol,vars=(1,2,3)) - - -generate_function(de, [x,y,z], [σ,ρ,β]) - - -generate_function(de, [x,y,z], [σ,ρ,β]; version=ModelingToolkit.SArrayFunction) - - -jac = calculate_jacobian(de) - - -jac_expr = generate_jacobian(de) - - -ModelingToolkit.generate_factorized_W(de)[1] - - -@variables x y z -@parameters σ ρ β - -# Define a nonlinear system -eqs = [0 ~ σ*(y-x), - 0 ~ x*(ρ-z)-y, - 0 ~ x*y - β*z] -ns = NonlinearSystem(eqs, [x,y,z]) -nlsys_func = generate_function(ns, [x,y,z], [σ,ρ,β]) - - -nl_f = @eval eval(nlsys_func) -# Make a closure over the parameters for for NLsolve.jl -f2 = (du,u) -> nl_f(du,u,(10.0,26.0,2.33)) - -using NLsolve -nlsolve(f2,ones(3)) - - -@derivatives D3'''~t -@derivatives D2''~t -@variables u(t), x(t) -eqs = [D3(u) ~ 2(D2(u)) + D(u) + D(x) + 1 - D2(x) ~ D(x) + 2] -de = ODESystem(eqs) -de1 = ode_order_lowering(de) - - -de1.eqs - - -@parameters t σ ρ β -@variables x(t) y(t) z(t) -@derivatives D'~t Dx'~x Dy'~y Dz'~z -eqs = [D(x) ~ σ*(y-x), - D(y) ~ x*(ρ-z)-y, - D(z) ~ x*y - β*z] -J = [Dx(eqs[1].rhs) Dy(eqs[1].rhs) Dz(eqs[1].rhs) - Dx(eqs[2].rhs) Dy(eqs[2].rhs) Dz(eqs[2].rhs) - Dx(eqs[3].rhs) Dy(eqs[3].rhs) Dz(eqs[3].rhs)] - - -J = expand_derivatives.(J) - - -using LinearAlgebra -luJ = lu(J) - - -luJ.L - - -invJ = inv(J) - - -function lorenz(du,u,p,t) - du[1] = p[1]*(u[2]-u[1]) - du[2] = u[1]*(p[2]-u[3]) - u[2] - du[3] = u[1]*u[2] - p[3]*u[3] -end - - -u = [x,y,z] -du = similar(u) -p = [σ,ρ,β] -lorenz(du,u,p,t) -du - - -J = [Dx(du[1]) Dy(du[1]) Dz(du[1]) - Dx(du[2]) Dy(du[2]) Dz(du[2]) - Dx(du[3]) Dy(du[3]) Dz(du[3])] -J = expand_derivatives.(J) - - -using SparseArrays -function SparseArrays.SparseMatrixCSC(M::Matrix{T}) where {T<:ModelingToolkit.Expression} - idxs = findall(!iszero, M) - I = [i[1] for i in idxs] - J = [i[2] for i in idxs] - V = [M[i] for i in idxs] - return SparseArrays.sparse_IJ_sorted!(I, J, V, size(M)...) -end -sJ = SparseMatrixCSC(J) - - -@parameters σ(..) -eqs = [D(x) ~ σ(t-1)*(y-x), - D(y) ~ x*(σ(t^2)-z)-y, - D(z) ~ x*y - β*z] - - -@derivatives Dₜ'~t -Dₜ(x*(σ(t^2)-z)-y) -expand_derivatives(Dₜ(x*(σ(t^2)-z)-y)) - - -_f(x) = 2x + x^2 -_f(x) - - -f(x) = 2x + x^2 -@register f(x) - - -f(x) - - -function ModelingToolkit.derivative(::typeof(f), args::NTuple{1,Any}, ::Val{1}) - 2 + 2args[1] -end -expand_derivatives(Dx(f(x))) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/ode_extras/02-feagin.jl b/script/ode_extras/02-feagin.jl deleted file mode 100644 index f029f8dc..00000000 --- a/script/ode_extras/02-feagin.jl +++ /dev/null @@ -1,35 +0,0 @@ - -using DifferentialEquations -const linear_bigα = big(1.01) -f(u,p,t) = (linear_bigα*u) - -# Add analytical solution so that errors are checked -f_analytic(u0,p,t) = u0*exp(linear_bigα*t) -ff = ODEFunction(f,analytic=f_analytic) -prob = ODEProblem(ff,big(0.5),(0.0,1.0)) -sol = solve(prob,Feagin14(),dt=1//16,adaptive=false); - - -println(sol.errors) - - -eps(Float64) - - -sol =solve(prob,Feagin14()); -println(sol.errors); print("The length was $(length(sol))") - - -using DiffEqDevTools -dts = 1.0 ./ 2.0 .^(10:-1:4) -sim = test_convergence(dts,prob,Feagin14()) - - -using Plots -gr() -plot(sim) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/ode_extras/03-ode_minmax.jl b/script/ode_extras/03-ode_minmax.jl deleted file mode 100644 index 212157a7..00000000 --- a/script/ode_extras/03-ode_minmax.jl +++ /dev/null @@ -1,88 +0,0 @@ - -#Constants and setup -using OrdinaryDiffEq -initial = [0.01, 0.01, 0.01, 0.01] -tspan = (0.,100.) - -#Define the problem -function double_pendulum_hamiltonian(udot,u,p,t) - α = u[1] - lα = u[2] - β = u[3] - lβ = u[4] - udot .= - [2(lα-(1+cos(β))lβ)/(3-cos(2β)), - -2sin(α) - sin(α+β), - 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)), - -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)] -end - -#Pass to solvers -poincare = ODEProblem(double_pendulum_hamiltonian, initial, tspan) - - -sol = solve(poincare, Tsit5()) - - -using Plots; gr() -plot(sol, vars=[(0,3),(0,4)], leg=false, plotdensity=10000) - - -plot(sol, vars=(3,4), leg=false) - - -f = (t) -> sol(t,idxs=4) - - -using Optim -opt = optimize(f,18.0,22.0) - - -println(opt.minimizer) -println(opt.minimum) - - -f = (t) -> -sol(first(t),idxs=4) -opt2 = optimize(f,0.0,22.0) - - -plot(sol, vars=(0,4), plotdensity=10000) -scatter!([opt.minimizer],[opt.minimum],label="Local Min") -scatter!([opt2.minimizer],[-opt2.minimum],label="Local Max") - - -f = (t) -> -sol(first(t),idxs=4) -opt = optimize(f,[20.0],BFGS()) - - -import NLopt, ForwardDiff - -count = 0 # keep track of # function evaluations - -function g(t::Vector, grad::Vector) - if length(grad) > 0 - #use ForwardDiff for the gradients - grad[1] = ForwardDiff.derivative((t)->sol(first(t),idxs=4),t) - end - sol(first(t),idxs=4) -end -opt = NLopt.Opt(:GN_ORIG_DIRECT_L, 1) -NLopt.lower_bounds!(opt, [0.0]) -NLopt.upper_bounds!(opt, [40.0]) -NLopt.xtol_rel!(opt,1e-8) -NLopt.min_objective!(opt, g) -(minf,minx,ret) = NLopt.optimize(opt,[20.0]) -println(minf," ",minx," ",ret) -NLopt.max_objective!(opt, g) -(maxf,maxx,ret) = NLopt.optimize(opt,[20.0]) -println(maxf," ",maxx," ",ret) - - -plot(sol, vars=(0,4), plotdensity=10000) -scatter!([minx],[minf],label="Global Min") -scatter!([maxx],[maxf],label="Global Max") - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/ode_extras/04-monte_carlo_parameter_estim.jl b/script/ode_extras/04-monte_carlo_parameter_estim.jl deleted file mode 100644 index 4148c5ca..00000000 --- a/script/ode_extras/04-monte_carlo_parameter_estim.jl +++ /dev/null @@ -1,74 +0,0 @@ - -using DifferentialEquations, DiffEqParamEstim, Plots, Optim - -# Monte Carlo Problem Set Up for solving set of ODEs with different initial conditions - -# Set up Lotka-Volterra system -function pf_func(du,u,p,t) - du[1] = p[1] * u[1] - p[2] * u[1]*u[2] - du[2] = -3 * u[2] + u[1]*u[2] -end -p = [1.5,1.0] -prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),p) - - -# Setting up to solve the problem N times (for the N different initial conditions) -N = 10; -initial_conditions = [[1.0,1.0], [1.0,1.5], [1.5,1.0], [1.5,1.5], [0.5,1.0], [1.0,0.5], [0.5,0.5], [2.0,1.0], [1.0,2.0], [2.0,2.0]] -function prob_func(prob,i,repeat) - ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p) -end -monte_prob = MonteCarloProblem(prob,prob_func=prob_func) - - -# Check above does what we want -sim = solve(monte_prob,Tsit5(),num_monte=N) -plot(sim) - - -# Generate a dataset from these runs -data_times = 0.0:0.1:10.0 -sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times) -data = Array(sim) - - -# Building a loss function -losses = [L2Loss(data_times,data[:,:,i]) for i in 1:N] - - -loss(sim) = sum(losses[i](sim[i]) for i in 1:N) - - -prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),[1.2,0.8]) -function prob_func(prob,i,repeat) - ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p) -end -monte_prob = MonteCarloProblem(prob,prob_func=prob_func) -sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times) -loss(sim) - - -obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N, - saveat=data_times) - - -lower = zeros(2) -upper = fill(2.0,2) -result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS())) - - -result - - -obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N, - abstol=1e-8,reltol=1e-8, - saveat=data_times) -result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS())) - - -result - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/test.jl b/script/test.jl deleted file mode 100644 index ef9d4119..00000000 --- a/script/test.jl +++ /dev/null @@ -1,4 +0,0 @@ - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/type_handling/01-number_types.jl b/script/type_handling/01-number_types.jl deleted file mode 100644 index 9282ec72..00000000 --- a/script/type_handling/01-number_types.jl +++ /dev/null @@ -1,53 +0,0 @@ - -using DifferentialEquations -f = (u,p,t) -> (p*u) -prob_ode_linear = ODEProblem(f,1/2,(0.0,1.0),1.01); - - -prob = prob_ode_linear -sol =solve(prob,Tsit5()) -println(sol) - - -prob = ODEProblem(f,1/2,(0//1,1//1),101//100); -sol = solve(prob,RK4(),dt=1//2^(6),adaptive=false) -println(sol) - - -prob = ODEProblem(f,BigInt(1)//BigInt(2),(0//1,1//1),101//100); -sol =solve(prob,RK4(),dt=1//2^(6),adaptive=false) -println(sol[end]) - - -prob_ode_biglinear = ODEProblem(f,big(1.0)/big(2.0),(big(0.0),big(1.0)),big(1.01)) -sol =solve(prob_ode_biglinear,Tsit5()) -println(sol[end]) - - -using DoubleFloats -prob_ode_doublelinear = ODEProblem(f,Double64(1)/Double64(2),(Double64(0),Double64(1)),Double64(1.01)) -sol =solve(prob_ode_doublelinear,Tsit5()) -println(sol[end]) - - -using ArbNumerics -prob_ode_arbfloatlinear = ODEProblem(f,ArbFloat(1)/ArbFloat(2),(ArbFloat(0.0),ArbFloat(1.0)),ArbFloat(1.01)) -sol =solve(prob_ode_arbfloatlinear,Tsit5()) -println(sol[end]) - - -using DecFP -prob_ode_decfplinear = ODEProblem(f,Dec128(1)/Dec128(2),(Dec128(0.0),Dec128(1.0)),Dec128(1.01)) -sol =solve(prob_ode_decfplinear,Tsit5()) -println(sol[end]); println(typeof(sol[end])) - - -using Decimals -prob_ode_decimallinear = ODEProblem(f,[decimal("1.0")]./[decimal("2.0")],(0//1,1//1),decimal(1.01)) -sol =solve(prob_ode_decimallinear,RK4(),dt=1/2^(6)) #Fails -println(sol[end]); println(typeof(sol[end])) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/type_handling/02-uncertainties.jl b/script/type_handling/02-uncertainties.jl deleted file mode 100644 index 76dcbcb6..00000000 --- a/script/type_handling/02-uncertainties.jl +++ /dev/null @@ -1,107 +0,0 @@ - -using Measurements - -5.23 ± 0.14 === 5.23 ± 0.14 - - -(5.23± 0.14) - (5.23 ± 0.14) - - -(5.23 ± 0.14) / (5.23 ± 0.14) - - -x = 5.23 ± 0.14 -x === x - - -x - x - - -x / x - - -using DifferentialEquations, Measurements, Plots - -# Half-life and mean lifetime of radiocarbon, in years -t_12 = 5730 ± 40 -τ = t_12 / log(2) - -#Setup -u₀ = 1 ± 0 -tspan = (0.0, 10000.0) - -#Define the problem -radioactivedecay(u,p,t) = - u / τ - -#Pass to solver -prob = ODEProblem(radioactivedecay, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-8) - -# Analytic solution -u = exp.(- sol.t / τ) - -plot(sol.t, sol.u, label = "Numerical", xlabel = "Years", ylabel = "Fraction of Carbon-14") -plot!(sol.t, u, label = "Analytic") - - -println("Quantity of carbon-14 after ", sol.t[11], " years:") -println("Numerical: ", sol[11]) -println("Analytic: ", u[11]) - - -using DifferentialEquations, Measurements, Plots - -g = 9.79 ± 0.02; # Gravitational constants -L = 1.00 ± 0.01; # Length of the pendulum - -#Initial Conditions -u₀ = [0 ± 0, π / 60 ± 0.01] # Initial speed and initial angle -tspan = (0.0, 6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L)*θ -end - -#Pass to solvers -prob = ODEProblem(simplependulum, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-6) - -# Analytic solution -u = u₀[2] .* cos.(sqrt(g / L) .* sol.t) - -plot(sol.t, getindex.(sol.u, 2), label = "Numerical") -plot!(sol.t, u, label = "Analytic") - - -plot(sol.t, getindex.(sol.u, 2) .- u, label = "") - - -g = 9.79 ± 0.02; # Gravitational constants -L = 1.00 ± 0.01; # Length of the pendulum - -#Initial Conditions -u₀ = [0 ± 0, π / 3 ± 0.02] # Initial speed and initial angle -tspan = (0.0, 6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L) * sin(θ) -end - -#Pass to solvers -prob = ODEProblem(simplependulum, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-6) - -plot(sol.t, getindex.(sol.u, 2), label = "Numerical") - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/script/type_handling/03-unitful.jl b/script/type_handling/03-unitful.jl deleted file mode 100644 index 6a853650..00000000 --- a/script/type_handling/03-unitful.jl +++ /dev/null @@ -1,41 +0,0 @@ - -using Unitful -t = 1.0u"s" - - -t2 = 1.02u"s" -t+t2 - - -t*t2 - - -sqrt(t) - - -t + sqrt(t) - - -using DifferentialEquations -f = (y,p,t) -> 0.5*y -u0 = 1.5u"N" -prob = ODEProblem(f,u0,(0.0u"s",1.0u"s")) -sol = solve(prob,Tsit5()) - - -f = (y,p,t) -> 0.5*y/3.0u"s" -prob = ODEProblem(f,u0,(0.0u"s",1.0u"s")) -sol = solve(prob,Tsit5()) - - -print(sol[:]) - - -using Plots -gr() -plot(ustrip(sol.t),ustrip(sol[:]),lw=3) - - -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) - diff --git a/src/DiffEqTutorials.jl b/src/DiffEqTutorials.jl deleted file mode 100644 index 1407915d..00000000 --- a/src/DiffEqTutorials.jl +++ /dev/null @@ -1,118 +0,0 @@ -module DiffEqTutorials - -using Weave, Pkg, InteractiveUtils, IJulia - -repo_directory = joinpath(@__DIR__,"..") -cssfile = joinpath(@__DIR__, "..", "templates", "skeleton_css.css") -latexfile = joinpath(@__DIR__, "..", "templates", "julia_tex.tpl") - -function weave_file(folder,file,build_list=(:script,:html,:pdf,:notebook); kwargs...) - tmp = joinpath(repo_directory,"tutorials",folder,file) - args = Dict{Symbol,String}(:folder=>folder,:file=>file) - if :script ∈ build_list - println("Building Script") - dir = joinpath(repo_directory,"script",folder) - isdir(dir) || mkdir(dir) - args[:doctype] = "script" - tangle(tmp;out_path=dir) - end - if :html ∈ build_list - println("Building HTML") - dir = joinpath(repo_directory,"html",folder) - isdir(dir) || mkdir(dir) - args[:doctype] = "html" - weave(tmp,doctype = "md2html",out_path=dir,args=args; fig_ext=".svg", css=cssfile, kwargs...) - end - if :pdf ∈ build_list - println("Building PDF") - dir = joinpath(repo_directory,"pdf",folder) - isdir(dir) || mkdir(dir) - args[:doctype] = "pdf" - weave(tmp,doctype="md2pdf",out_path=dir,args=args; template=latexfile, kwargs...) - end - if :github ∈ build_list - println("Building Github Markdown") - dir = joinpath(repo_directory,"markdown",folder) - isdir(dir) || mkdir(dir) - args[:doctype] = "github" - weave(tmp,doctype = "github",out_path=dir,args=args; kwargs...) - end - if :notebook ∈ build_list - println("Building Notebook") - dir = joinpath(repo_directory,"notebook",folder) - isdir(dir) || mkdir(dir) - args[:doctype] = "notebook" - Weave.convert_doc(tmp,joinpath(dir,file[1:end-4]*".ipynb")) - end -end - -function weave_all() - for folder in readdir(joinpath(repo_directory,"tutorials")) - folder == "test.jmd" && continue - weave_folder(folder) - end -end - -function weave_folder(folder) - for file in readdir(joinpath(repo_directory,"tutorials",folder)) - println("Building $(joinpath(folder,file)))") - try - weave_file(folder,file) - catch - end - end -end - -function tutorial_footer(folder=nothing, file=nothing; remove_homedir=true) - display("text/markdown", """ - ## Appendix - - This tutorial is part of the DiffEqTutorials.jl repository, found at: - """) - if folder !== nothing && file !== nothing - display("text/markdown", """ - To locally run this tutorial, do the following commands: - ``` - using DiffEqTutorials - DiffEqTutorials.weave_file("$folder","$file") - ``` - """) - end - display("text/markdown", "Computer Information:") - vinfo = sprint(InteractiveUtils.versioninfo) - display("text/markdown", """ - ``` - $(vinfo) - ``` - """) - - ctx = Pkg.API.Context() - pkgs = Pkg.Display.status(Pkg.API.Context(), use_as_api=true); - projfile = ctx.env.project_file - remove_homedir && (projfile = replace(projfile, homedir() => "~")) - - display("text/markdown",""" - Package Information: - """) - - md = "" - md *= "```\nStatus `$(projfile)`\n" - - for pkg in pkgs - if !isnothing(pkg.old) && pkg.old.ver !== nothing - md *= "[$(string(pkg.uuid))] $(string(pkg.name)) $(string(pkg.old.ver))\n" - else - md *= "[$(string(pkg.uuid))] $(string(pkg.name))\n" - end - end - md *= "```" - display("text/markdown", md) -end - -function open_notebooks() - Base.eval(Main, Meta.parse("import IJulia")) - path = joinpath(repo_directory,"notebook") - IJulia.notebook(;dir=path) -end - -end diff --git a/src/SciMLTutorials.jl b/src/SciMLTutorials.jl new file mode 100644 index 00000000..7841c3bf --- /dev/null +++ b/src/SciMLTutorials.jl @@ -0,0 +1,140 @@ +module SciMLTutorials + +using Weave, Pkg, IJulia, InteractiveUtils, Markdown + +repo_directory = joinpath(@__DIR__, "..") +cssfile = joinpath(@__DIR__, "..", "templates", "skeleton_css.css") +latexfile = joinpath(@__DIR__, "..", "templates", "julia_tex.tpl") +default_builds = (:script, :github) + +function weave_file(folder, file, build_list = default_builds) + target = joinpath(folder, file) + @info("Weaving $(target)") + + if isfile(joinpath(folder, "Project.toml")) && build_list != (:notebook,) + @info("Instantiating", folder) + Pkg.activate(joinpath(folder)) + Pkg.instantiate() + Pkg.build() + + @info("Printing out `Pkg.status()`") + Pkg.status() + end + + args = Dict{Symbol, String}(:folder=>folder, :file=>file) + if :script ∈ build_list + println("Building Script") + dir = joinpath(repo_directory, "script", basename(folder)) + mkpath(dir) + tangle(target; out_path = dir) + end + if :html ∈ build_list + println("Building HTML") + dir = joinpath(repo_directory, "html", basename(folder)) + mkpath(dir) + weave(target, doctype = "md2html", out_path = dir, + args = args, css = cssfile, fig_ext = ".svg") + end + if :pdf ∈ build_list + println("Building PDF") + dir = joinpath(repo_directory, "pdf", basename(folder)) + mkpath(dir) + try + weave(target, doctype = "md2pdf", out_path = dir, + template = latexfile, args = args) + catch ex + @warn "PDF generation failed" exception=(ex, catch_backtrace()) + end + end + if :github ∈ build_list + println("Building Github Markdown") + dir = joinpath(repo_directory, "markdown", basename(folder)) + mkpath(dir) + weave(target, doctype = "github", out_path = dir, args = args) + end + if :notebook ∈ build_list + println("Building Notebook") + dir = joinpath(repo_directory, "notebook", basename(folder)) + mkpath(dir) + Weave.convert_doc(target, joinpath(dir, file[1:(end - 4)]*".ipynb")) + end +end + +function weave_all(build_list = default_builds) + for folder in readdir(joinpath(repo_directory, "tutorials")) + folder == "test.jmd" && continue + weave_folder(joinpath(repo_directory, "tutorials", folder), build_list) + end +end + +function weave_folder(folder, build_list = default_builds) + for file in readdir(joinpath(folder)) + # Skip non-`.jmd` files + if !endswith(file, ".jmd") + continue + end + + try + weave_file(folder, file, build_list) + catch e + @error(e) + end + end +end + +function tutorial_footer(folder = nothing, file = nothing) + display(md""" + ## Appendix + + These tutorials are a part of the SciMLTutorials.jl repository, found at: . + For more information on high-performance scientific machine learning, check out the SciML Open Source Software Organization . + + """) + if folder !== nothing && file !== nothing + display(Markdown.parse(""" + To locally run this tutorial, do the following commands: + ``` + using SciMLTutorials + SciMLTutorials.weave_file("$folder","$file") + ``` + """)) + end + display(md"Computer Information:") + vinfo = sprint(InteractiveUtils.versioninfo) + display(Markdown.parse(""" + ``` + $(vinfo) + ``` + """)) + + display(md""" + Package Information: + """) + + proj = sprint(io -> Pkg.status(io = io)) + mani = sprint(io -> Pkg.status(io = io, mode = Pkg.PKGMODE_MANIFEST)) + + md = """ + ``` + $(chomp(proj)) + ``` + + And the full manifest: + + ``` + $(chomp(mani)) + ``` + """ + display(Markdown.parse(md)) +end + +function open_notebooks() + Base.eval(Main, Meta.parse("import IJulia")) + weave_all((:notebook,)) + path = joinpath(repo_directory, "notebook") + newpath = joinpath(pwd(), "generated_notebooks") + mv(path, newpath) + IJulia.notebook(; dir = newpath) +end + +end diff --git a/test/runtests.jl b/test/runtests.jl index 63652463..8d7d6e65 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,2 +1,3 @@ -using DiffEqTutorials -DiffEqTutorials.weave_file(".","test.jmd") +using SciMLTutorials +tutorials_dir = joinpath(dirname(@__DIR__), "tutorials") +SciMLTutorials.weave_file(joinpath(tutorials_dir, "Testing"), "test.jmd") diff --git a/tutorials/Testing/Manifest.toml b/tutorials/Testing/Manifest.toml new file mode 100644 index 00000000..09c3a1aa --- /dev/null +++ b/tutorials/Testing/Manifest.toml @@ -0,0 +1,878 @@ +# This file is machine-generated - editing it directly is not advised + +[[Adapt]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "f1b523983a58802c4695851926203b36e28f09db" +uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +version = "3.3.0" + +[[ArgTools]] +uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" + +[[Artifacts]] +uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" + +[[Base64]] +uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" + +[[Bzip2_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "c3598e525718abcc440f69cc6d5f60dda0a1b61e" +uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0" +version = "1.0.6+5" + +[[Cairo_jll]] +deps = ["Artifacts", "Bzip2_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"] +git-tree-sha1 = "e2f47f6d8337369411569fd45ae5753ca10394c6" +uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a" +version = "1.16.0+6" + +[[ColorSchemes]] +deps = ["ColorTypes", "Colors", "FixedPointNumbers", "Random", "StaticArrays"] +git-tree-sha1 = "c8fd01e4b736013bc61b704871d20503b33ea402" +uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4" +version = "3.12.1" + +[[ColorTypes]] +deps = ["FixedPointNumbers", "Random"] +git-tree-sha1 = "024fe24d83e4a5bf5fc80501a314ce0d1aa35597" +uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f" +version = "0.11.0" + +[[Colors]] +deps = ["ColorTypes", "FixedPointNumbers", "Reexport"] +git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40" +uuid = "5ae59095-9a9b-59fe-a467-6f913c188581" +version = "0.12.8" + +[[Compat]] +deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"] +git-tree-sha1 = "e4e2b39db08f967cc1360951f01e8a75ec441cab" +uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" +version = "3.30.0" + +[[CompilerSupportLibraries_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" + +[[Conda]] +deps = ["JSON", "VersionParsing"] +git-tree-sha1 = "299304989a5e6473d985212c28928899c74e9421" +uuid = "8f4d0f93-b110-5947-807f-2305c1781a2d" +version = "1.5.2" + +[[Contour]] +deps = ["StaticArrays"] +git-tree-sha1 = "9f02045d934dc030edad45944ea80dbd1f0ebea7" +uuid = "d38c429a-6771-53c6-b99e-75d170b6e991" +version = "0.5.7" + +[[DataAPI]] +git-tree-sha1 = "dfb3b7e89e395be1e25c2ad6d7690dc29cc53b1d" +uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a" +version = "1.6.0" + +[[DataStructures]] +deps = ["Compat", "InteractiveUtils", "OrderedCollections"] +git-tree-sha1 = "4437b64df1e0adccc3e5d1adbc3ac741095e4677" +uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" +version = "0.18.9" + +[[DataValueInterfaces]] +git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" +uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464" +version = "1.0.0" + +[[Dates]] +deps = ["Printf"] +uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" + +[[DelimitedFiles]] +deps = ["Mmap"] +uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" + +[[Distributed]] +deps = ["Random", "Serialization", "Sockets"] +uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" + +[[DocStringExtensions]] +deps = ["LibGit2", "Markdown", "Pkg", "Test"] +git-tree-sha1 = "9d4f64f79012636741cf01133158a54b24924c32" +uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" +version = "0.8.4" + +[[Downloads]] +deps = ["ArgTools", "LibCURL", "NetworkOptions"] +uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" + +[[EarCut_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "92d8f9f208637e8d2d28c664051a00569c01493d" +uuid = "5ae413db-bbd1-5e63-b57d-d24a61df00f5" +version = "2.1.5+1" + +[[Expat_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "b3bfd02e98aedfa5cf885665493c5598c350cd2f" +uuid = "2e619515-83b5-522b-bb60-26c02a35a201" +version = "2.2.10+0" + +[[FFMPEG]] +deps = ["FFMPEG_jll", "x264_jll"] +git-tree-sha1 = "9a73ffdc375be61b0e4516d83d880b265366fe1f" +uuid = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" +version = "0.4.0" + +[[FFMPEG_jll]] +deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "LibVPX_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "Pkg", "Zlib_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"] +git-tree-sha1 = "3cc57ad0a213808473eafef4845a74766242e05f" +uuid = "b22a6f82-2f65-5046-a5b2-351ab43fb4e5" +version = "4.3.1+4" + +[[FileWatching]] +uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" + +[[FixedPointNumbers]] +deps = ["Statistics"] +git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc" +uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93" +version = "0.8.4" + +[[Fontconfig_jll]] +deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"] +git-tree-sha1 = "35895cf184ceaab11fd778b4590144034a167a2f" +uuid = "a3f928ae-7b40-5064-980b-68af3947d34b" +version = "2.13.1+14" + +[[Formatting]] +deps = ["Printf"] +git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8" +uuid = "59287772-0a20-5a39-b81b-1366585eb4c0" +version = "0.4.2" + +[[FreeType2_jll]] +deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] +git-tree-sha1 = "cbd58c9deb1d304f5a245a0b7eb841a2560cfec6" +uuid = "d7e528f0-a631-5988-bf34-fe36492bcfd7" +version = "2.10.1+5" + +[[FriBidi_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "0d20aed5b14dd4c9a2453c1b601d08e1149679cc" +uuid = "559328eb-81f9-559d-9380-de523a88c83c" +version = "1.0.5+6" + +[[GLFW_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Pkg", "Xorg_libXcursor_jll", "Xorg_libXi_jll", "Xorg_libXinerama_jll", "Xorg_libXrandr_jll"] +git-tree-sha1 = "a199aefead29c3c2638c3571a9993b564109d45a" +uuid = "0656b61e-2033-5cc2-a64a-77c0f6c09b89" +version = "3.3.4+0" + +[[GR]] +deps = ["Base64", "DelimitedFiles", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Pkg", "Printf", "Random", "Serialization", "Sockets", "Test", "UUIDs"] +git-tree-sha1 = "011458b83178ac913dc4eb73b229af45bdde5d83" +uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71" +version = "0.57.4" + +[[GR_jll]] +deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Pkg", "Qt5Base_jll", "Zlib_jll", "libpng_jll"] +git-tree-sha1 = "90acee5c38f4933342fa9a3bbc483119d20e7033" +uuid = "d2c73de3-f751-5644-a686-071e5b155ba9" +version = "0.57.2+0" + +[[GeometryBasics]] +deps = ["EarCut_jll", "IterTools", "LinearAlgebra", "StaticArrays", "StructArrays", "Tables"] +git-tree-sha1 = "4136b8a5668341e58398bb472754bff4ba0456ff" +uuid = "5c1252a2-5f33-56bf-86c9-59e7332b4326" +version = "0.3.12" + +[[Gettext_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "XML2_jll"] +git-tree-sha1 = "9b02998aba7bf074d14de89f9d37ca24a1a0b046" +uuid = "78b55507-aeef-58d4-861c-77aaff3498b1" +version = "0.21.0+0" + +[[Glib_jll]] +deps = ["Artifacts", "Gettext_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Libiconv_jll", "Libmount_jll", "PCRE_jll", "Pkg", "Zlib_jll"] +git-tree-sha1 = "47ce50b742921377301e15005c96e979574e130b" +uuid = "7746bdde-850d-59dc-9ae8-88ece973131d" +version = "2.68.1+0" + +[[Grisu]] +git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2" +uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe" +version = "1.0.2" + +[[HTTP]] +deps = ["Base64", "Dates", "IniFile", "MbedTLS", "NetworkOptions", "Sockets", "URIs"] +git-tree-sha1 = "1fd26bc48f96adcdd8823f7fc300053faf3d7ba1" +uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3" +version = "0.9.9" + +[[Highlights]] +deps = ["DocStringExtensions", "InteractiveUtils", "REPL"] +git-tree-sha1 = "f823a2d04fb233d52812c8024a6d46d9581904a4" +uuid = "eafb193a-b7ab-5a9e-9068-77385905fa72" +version = "0.4.5" + +[[IJulia]] +deps = ["Base64", "Conda", "Dates", "InteractiveUtils", "JSON", "Libdl", "Markdown", "MbedTLS", "Pkg", "Printf", "REPL", "Random", "SoftGlobalScope", "Test", "UUIDs", "ZMQ"] +git-tree-sha1 = "d8b9c31196e1dd92181cd0f5760ca2d2ffb4ac0f" +uuid = "7073ff75-c697-5162-941a-fcdaad2a7d2a" +version = "1.23.2" + +[[IniFile]] +deps = ["Test"] +git-tree-sha1 = "098e4d2c533924c921f9f9847274f2ad89e018b8" +uuid = "83e8ac13-25f8-5344-8a64-a9f2b223428f" +version = "0.5.0" + +[[InteractiveUtils]] +deps = ["Markdown"] +uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" + +[[IterTools]] +git-tree-sha1 = "05110a2ab1fc5f932622ffea2a003221f4782c18" +uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e" +version = "1.3.0" + +[[IteratorInterfaceExtensions]] +git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856" +uuid = "82899510-4779-5014-852e-03e436cf321d" +version = "1.0.0" + +[[JLLWrappers]] +deps = ["Preferences"] +git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e" +uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" +version = "1.3.0" + +[[JSON]] +deps = ["Dates", "Mmap", "Parsers", "Unicode"] +git-tree-sha1 = "81690084b6198a2e1da36fcfda16eeca9f9f24e4" +uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" +version = "0.21.1" + +[[JpegTurbo_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "9aff0587d9603ea0de2c6f6300d9f9492bbefbd3" +uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8" +version = "2.0.1+3" + +[[LAME_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "df381151e871f41ee86cee4f5f6fd598b8a68826" +uuid = "c1c5ebd0-6772-5130-a774-d5fcae4a789d" +version = "3.100.0+3" + +[[LZO_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "e5b909bcf985c5e2605737d2ce278ed791b89be6" +uuid = "dd4b983a-f0e5-5f8d-a1b7-129d4a5fb1ac" +version = "2.10.1+0" + +[[LaTeXStrings]] +git-tree-sha1 = "c7f1c695e06c01b95a67f0cd1d34994f3e7db104" +uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" +version = "1.2.1" + +[[Latexify]] +deps = ["Formatting", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "Printf", "Requires"] +git-tree-sha1 = "f77a16cb3804f4a74f57e5272a6a4a9a628577cb" +uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" +version = "0.15.5" + +[[LibCURL]] +deps = ["LibCURL_jll", "MozillaCACerts_jll"] +uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" + +[[LibCURL_jll]] +deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] +uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" + +[[LibGit2]] +deps = ["Base64", "NetworkOptions", "Printf", "SHA"] +uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" + +[[LibSSH2_jll]] +deps = ["Artifacts", "Libdl", "MbedTLS_jll"] +uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" + +[[LibVPX_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "85fcc80c3052be96619affa2fe2e6d2da3908e11" +uuid = "dd192d2f-8180-539f-9fb4-cc70b1dcf69a" +version = "1.9.0+1" + +[[Libdl]] +uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" + +[[Libffi_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "761a393aeccd6aa92ec3515e428c26bf99575b3b" +uuid = "e9f186c6-92d2-5b65-8a66-fee21dc1b490" +version = "3.2.2+0" + +[[Libgcrypt_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgpg_error_jll", "Pkg"] +git-tree-sha1 = "64613c82a59c120435c067c2b809fc61cf5166ae" +uuid = "d4300ac3-e22c-5743-9152-c294e39db1e4" +version = "1.8.7+0" + +[[Libglvnd_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll", "Xorg_libXext_jll"] +git-tree-sha1 = "7739f837d6447403596a75d19ed01fd08d6f56bf" +uuid = "7e76a0d4-f3c7-5321-8279-8d96eeed0f29" +version = "1.3.0+3" + +[[Libgpg_error_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "c333716e46366857753e273ce6a69ee0945a6db9" +uuid = "7add5ba3-2f88-524e-9cd5-f83b8a55f7b8" +version = "1.42.0+0" + +[[Libiconv_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "8d22e127ea9a0917bc98ebd3755c8bd31989381e" +uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531" +version = "1.16.1+0" + +[[Libmount_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "9c30530bf0effd46e15e0fdcf2b8636e78cbbd73" +uuid = "4b2f31a3-9ecc-558c-b454-b3730dcb73e9" +version = "2.35.0+0" + +[[Libtiff_jll]] +deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Pkg", "Zlib_jll", "Zstd_jll"] +git-tree-sha1 = "291dd857901f94d683973cdf679984cdf73b56d0" +uuid = "89763e89-9b03-5906-acba-b20f662cd828" +version = "4.1.0+2" + +[[Libuuid_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "7f3efec06033682db852f8b3bc3c1d2b0a0ab066" +uuid = "38a345b3-de98-5d2b-a5d3-14cd9215e700" +version = "2.36.0+0" + +[[LinearAlgebra]] +deps = ["Libdl"] +uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" + +[[Logging]] +uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" + +[[MacroTools]] +deps = ["Markdown", "Random"] +git-tree-sha1 = "6a8a2a625ab0dea913aba95c11370589e0239ff0" +uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" +version = "0.5.6" + +[[Markdown]] +deps = ["Base64"] +uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" + +[[MbedTLS]] +deps = ["Dates", "MbedTLS_jll", "Random", "Sockets"] +git-tree-sha1 = "1c38e51c3d08ef2278062ebceade0e46cefc96fe" +uuid = "739be429-bea8-5141-9913-cc70e7f3736d" +version = "1.0.3" + +[[MbedTLS_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" + +[[Measures]] +git-tree-sha1 = "e498ddeee6f9fdb4551ce855a46f54dbd900245f" +uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e" +version = "0.3.1" + +[[Missings]] +deps = ["DataAPI"] +git-tree-sha1 = "4ea90bd5d3985ae1f9a908bd4500ae88921c5ce7" +uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28" +version = "1.0.0" + +[[Mmap]] +uuid = "a63ad114-7e13-5084-954f-fe012c677804" + +[[MozillaCACerts_jll]] +uuid = "14a3606d-f60d-562e-9121-12d972cd8159" + +[[Mustache]] +deps = ["Printf", "Tables"] +git-tree-sha1 = "36995ef0d532fe08119d70b2365b7b03d4e00f48" +uuid = "ffc61752-8dc7-55ee-8c37-f3e9cdd09e70" +version = "1.0.10" + +[[NaNMath]] +git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb" +uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" +version = "0.3.5" + +[[NetworkOptions]] +uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" + +[[Ogg_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "a42c0f138b9ebe8b58eba2271c5053773bde52d0" +uuid = "e7412a2a-1a6e-54c0-be00-318e2571c051" +version = "1.3.4+2" + +[[OpenSSL_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "71bbbc616a1d710879f5a1021bcba65ffba6ce58" +uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" +version = "1.1.1+6" + +[[Opus_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "f9d57f4126c39565e05a2b0264df99f497fc6f37" +uuid = "91d4177d-7536-5919-b921-800302f37372" +version = "1.3.1+3" + +[[OrderedCollections]] +git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c" +uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" +version = "1.4.1" + +[[PCRE_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "b2a7af664e098055a7529ad1a900ded962bca488" +uuid = "2f80f16e-611a-54ab-bc61-aa92de5b98fc" +version = "8.44.0+0" + +[[Parsers]] +deps = ["Dates"] +git-tree-sha1 = "c8abc88faa3f7a3950832ac5d6e690881590d6dc" +uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" +version = "1.1.0" + +[[Pixman_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "b4f5d02549a10e20780a24fce72bea96b6329e29" +uuid = "30392449-352a-5448-841d-b1acce4e97dc" +version = "0.40.1+0" + +[[Pkg]] +deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] +uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" + +[[PlotThemes]] +deps = ["PlotUtils", "Requires", "Statistics"] +git-tree-sha1 = "a3a964ce9dc7898193536002a6dd892b1b5a6f1d" +uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a" +version = "2.0.1" + +[[PlotUtils]] +deps = ["ColorSchemes", "Colors", "Dates", "Printf", "Random", "Reexport", "Statistics"] +git-tree-sha1 = "ae9a295ac761f64d8c2ec7f9f24d21eb4ffba34d" +uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043" +version = "1.0.10" + +[[Plots]] +deps = ["Base64", "Contour", "Dates", "FFMPEG", "FixedPointNumbers", "GR", "GeometryBasics", "JSON", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs"] +git-tree-sha1 = "f3a57a5acc16a69c03539b3684354cbbbb72c9ad" +uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" +version = "1.15.2" + +[[Preferences]] +deps = ["TOML"] +git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a" +uuid = "21216c6a-2e73-6563-6e65-726566657250" +version = "1.2.2" + +[[Printf]] +deps = ["Unicode"] +uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" + +[[Qt5Base_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "Fontconfig_jll", "Glib_jll", "JLLWrappers", "Libdl", "Libglvnd_jll", "OpenSSL_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libxcb_jll", "Xorg_xcb_util_image_jll", "Xorg_xcb_util_keysyms_jll", "Xorg_xcb_util_renderutil_jll", "Xorg_xcb_util_wm_jll", "Zlib_jll", "xkbcommon_jll"] +git-tree-sha1 = "16626cfabbf7206d60d84f2bf4725af7b37d4a77" +uuid = "ea2cea3b-5b76-57ae-a6ef-0a8af62496e1" +version = "5.15.2+0" + +[[REPL]] +deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] +uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" + +[[Random]] +deps = ["Serialization"] +uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" + +[[RecipesBase]] +git-tree-sha1 = "b3fb709f3c97bfc6e948be68beeecb55a0b340ae" +uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" +version = "1.1.1" + +[[RecipesPipeline]] +deps = ["Dates", "NaNMath", "PlotUtils", "RecipesBase"] +git-tree-sha1 = "7a5026a6741c14147d1cb6daf2528a77ca28eb51" +uuid = "01d81517-befc-4cb6-b9ec-a95719d0359c" +version = "0.3.2" + +[[Reexport]] +git-tree-sha1 = "57d8440b0c7d98fc4f889e478e80f268d534c9d5" +uuid = "189a3867-3050-52da-a836-e630ba90ab69" +version = "1.0.0" + +[[Requires]] +deps = ["UUIDs"] +git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621" +uuid = "ae029012-a4dd-5104-9daa-d747884805df" +version = "1.1.3" + +[[SHA]] +uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" + +[[SciMLTutorials]] +deps = ["IJulia", "InteractiveUtils", "Pkg", "Plots", "Weave"] +git-tree-sha1 = "6d721be72323edd91679318c05aca8479bc7b20f" +uuid = "30cb0354-2223-46a9-baa0-41bdcfbe0178" +version = "0.9.0" + +[[Scratch]] +deps = ["Dates"] +git-tree-sha1 = "ad4b278adb62d185bbcb6864dc24959ab0627bf6" +uuid = "6c6a2e73-6563-6170-7368-637461726353" +version = "1.0.3" + +[[Serialization]] +uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" + +[[SharedArrays]] +deps = ["Distributed", "Mmap", "Random", "Serialization"] +uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383" + +[[Showoff]] +deps = ["Dates", "Grisu"] +git-tree-sha1 = "91eddf657aca81df9ae6ceb20b959ae5653ad1de" +uuid = "992d4aef-0814-514b-bc4d-f2e9a6c4116f" +version = "1.0.3" + +[[Sockets]] +uuid = "6462fe0b-24de-5631-8697-dd941f90decc" + +[[SoftGlobalScope]] +deps = ["REPL"] +git-tree-sha1 = "986ec2b6162ccb95de5892ed17832f95badf770c" +uuid = "b85f4697-e234-5449-a836-ec8e2f98b302" +version = "1.1.0" + +[[SortingAlgorithms]] +deps = ["DataStructures"] +git-tree-sha1 = "2ec1962eba973f383239da22e75218565c390a96" +uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" +version = "1.0.0" + +[[SparseArrays]] +deps = ["LinearAlgebra", "Random"] +uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + +[[StaticArrays]] +deps = ["LinearAlgebra", "Random", "Statistics"] +git-tree-sha1 = "c635017268fd51ed944ec429bcc4ad010bcea900" +uuid = "90137ffa-7385-5640-81b9-e52037218182" +version = "1.2.0" + +[[Statistics]] +deps = ["LinearAlgebra", "SparseArrays"] +uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" + +[[StatsAPI]] +git-tree-sha1 = "1958272568dc176a1d881acb797beb909c785510" +uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0" +version = "1.0.0" + +[[StatsBase]] +deps = ["DataAPI", "DataStructures", "LinearAlgebra", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"] +git-tree-sha1 = "2f6792d523d7448bbe2fec99eca9218f06cc746d" +uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" +version = "0.33.8" + +[[StructArrays]] +deps = ["Adapt", "DataAPI", "Tables"] +git-tree-sha1 = "44b3afd37b17422a62aea25f04c1f7e09ce6b07f" +uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" +version = "0.5.1" + +[[TOML]] +deps = ["Dates"] +uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" + +[[TableTraits]] +deps = ["IteratorInterfaceExtensions"] +git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39" +uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c" +version = "1.0.1" + +[[Tables]] +deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "TableTraits", "Test"] +git-tree-sha1 = "c9d2d262e9a327be1f35844df25fe4561d258dc9" +uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" +version = "1.4.2" + +[[Tar]] +deps = ["ArgTools", "SHA"] +uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" + +[[Test]] +deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] +uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[[URIs]] +git-tree-sha1 = "97bbe755a53fe859669cd907f2d96aee8d2c1355" +uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4" +version = "1.3.0" + +[[UUIDs]] +deps = ["Random", "SHA"] +uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" + +[[Unicode]] +uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" + +[[VersionParsing]] +git-tree-sha1 = "80229be1f670524750d905f8fc8148e5a8c4537f" +uuid = "81def892-9a0e-5fdd-b105-ffc91e053289" +version = "1.2.0" + +[[Wayland_jll]] +deps = ["Artifacts", "Expat_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg", "XML2_jll"] +git-tree-sha1 = "dc643a9b774da1c2781413fd7b6dcd2c56bb8056" +uuid = "a2964d1f-97da-50d4-b82a-358c7fce9d89" +version = "1.17.0+4" + +[[Wayland_protocols_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll"] +git-tree-sha1 = "2839f1c1296940218e35df0bbb220f2a79686670" +uuid = "2381bf8a-dfd0-557d-9999-79630e7b1b91" +version = "1.18.0+4" + +[[Weave]] +deps = ["Base64", "Dates", "Highlights", "JSON", "Markdown", "Mustache", "Pkg", "Printf", "REPL", "Requires", "Serialization", "YAML"] +git-tree-sha1 = "4afd286cd80d1c2c338f9a13356298feac7348d0" +uuid = "44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9" +version = "0.10.8" + +[[XML2_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"] +git-tree-sha1 = "1acf5bdf07aa0907e0a37d3718bb88d4b687b74a" +uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a" +version = "2.9.12+0" + +[[XSLT_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgcrypt_jll", "Libgpg_error_jll", "Libiconv_jll", "Pkg", "XML2_jll", "Zlib_jll"] +git-tree-sha1 = "91844873c4085240b95e795f692c4cec4d805f8a" +uuid = "aed1982a-8fda-507f-9586-7b0439959a61" +version = "1.1.34+0" + +[[Xorg_libX11_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll", "Xorg_xtrans_jll"] +git-tree-sha1 = "5be649d550f3f4b95308bf0183b82e2582876527" +uuid = "4f6342f7-b3d2-589e-9d20-edeb45f2b2bc" +version = "1.6.9+4" + +[[Xorg_libXau_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "4e490d5c960c314f33885790ed410ff3a94ce67e" +uuid = "0c0b7dd1-d40b-584c-a123-a41640f87eec" +version = "1.0.9+4" + +[[Xorg_libXcursor_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXfixes_jll", "Xorg_libXrender_jll"] +git-tree-sha1 = "12e0eb3bc634fa2080c1c37fccf56f7c22989afd" +uuid = "935fb764-8cf2-53bf-bb30-45bb1f8bf724" +version = "1.2.0+4" + +[[Xorg_libXdmcp_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "4fe47bd2247248125c428978740e18a681372dd4" +uuid = "a3789734-cfe1-5b06-b2d0-1dd0d9d62d05" +version = "1.1.3+4" + +[[Xorg_libXext_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] +git-tree-sha1 = "b7c0aa8c376b31e4852b360222848637f481f8c3" +uuid = "1082639a-0dae-5f34-9b06-72781eeb8cb3" +version = "1.3.4+4" + +[[Xorg_libXfixes_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] +git-tree-sha1 = "0e0dc7431e7a0587559f9294aeec269471c991a4" +uuid = "d091e8ba-531a-589c-9de9-94069b037ed8" +version = "5.0.3+4" + +[[Xorg_libXi_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXfixes_jll"] +git-tree-sha1 = "89b52bc2160aadc84d707093930ef0bffa641246" +uuid = "a51aa0fd-4e3c-5386-b890-e753decda492" +version = "1.7.10+4" + +[[Xorg_libXinerama_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll"] +git-tree-sha1 = "26be8b1c342929259317d8b9f7b53bf2bb73b123" +uuid = "d1454406-59df-5ea1-beac-c340f2130bc3" +version = "1.1.4+4" + +[[Xorg_libXrandr_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll"] +git-tree-sha1 = "34cea83cb726fb58f325887bf0612c6b3fb17631" +uuid = "ec84b674-ba8e-5d96-8ba1-2a689ba10484" +version = "1.5.2+4" + +[[Xorg_libXrender_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] +git-tree-sha1 = "19560f30fd49f4d4efbe7002a1037f8c43d43b96" +uuid = "ea2f1a96-1ddc-540d-b46f-429655e07cfa" +version = "0.9.10+4" + +[[Xorg_libpthread_stubs_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "6783737e45d3c59a4a4c4091f5f88cdcf0908cbb" +uuid = "14d82f49-176c-5ed1-bb49-ad3f5cbd8c74" +version = "0.1.0+3" + +[[Xorg_libxcb_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "XSLT_jll", "Xorg_libXau_jll", "Xorg_libXdmcp_jll", "Xorg_libpthread_stubs_jll"] +git-tree-sha1 = "daf17f441228e7a3833846cd048892861cff16d6" +uuid = "c7cfdc94-dc32-55de-ac96-5a1b8d977c5b" +version = "1.13.0+3" + +[[Xorg_libxkbfile_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] +git-tree-sha1 = "926af861744212db0eb001d9e40b5d16292080b2" +uuid = "cc61e674-0454-545c-8b26-ed2c68acab7a" +version = "1.1.0+4" + +[[Xorg_xcb_util_image_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] +git-tree-sha1 = "0fab0a40349ba1cba2c1da699243396ff8e94b97" +uuid = "12413925-8142-5f55-bb0e-6d7ca50bb09b" +version = "0.4.0+1" + +[[Xorg_xcb_util_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll"] +git-tree-sha1 = "e7fd7b2881fa2eaa72717420894d3938177862d1" +uuid = "2def613f-5ad1-5310-b15b-b15d46f528f5" +version = "0.4.0+1" + +[[Xorg_xcb_util_keysyms_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] +git-tree-sha1 = "d1151e2c45a544f32441a567d1690e701ec89b00" +uuid = "975044d2-76e6-5fbe-bf08-97ce7c6574c7" +version = "0.4.0+1" + +[[Xorg_xcb_util_renderutil_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] +git-tree-sha1 = "dfd7a8f38d4613b6a575253b3174dd991ca6183e" +uuid = "0d47668e-0667-5a69-a72c-f761630bfb7e" +version = "0.3.9+1" + +[[Xorg_xcb_util_wm_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] +git-tree-sha1 = "e78d10aab01a4a154142c5006ed44fd9e8e31b67" +uuid = "c22f9ab0-d5fe-5066-847c-f4bb1cd4e361" +version = "0.4.1+1" + +[[Xorg_xkbcomp_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxkbfile_jll"] +git-tree-sha1 = "4bcbf660f6c2e714f87e960a171b119d06ee163b" +uuid = "35661453-b289-5fab-8a00-3d9160c6a3a4" +version = "1.4.2+4" + +[[Xorg_xkeyboard_config_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xkbcomp_jll"] +git-tree-sha1 = "5c8424f8a67c3f2209646d4425f3d415fee5931d" +uuid = "33bec58e-1273-512f-9401-5d533626f822" +version = "2.27.0+4" + +[[Xorg_xtrans_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "79c31e7844f6ecf779705fbc12146eb190b7d845" +uuid = "c5fb5394-a638-5e4d-96e5-b29de1b5cf10" +version = "1.4.0+3" + +[[YAML]] +deps = ["Base64", "Dates", "Printf"] +git-tree-sha1 = "78c02bd295bbd0ca330f95e07ccdfcb69f6cbcd4" +uuid = "ddb6d928-2868-570f-bddf-ab3f9cf99eb6" +version = "0.4.6" + +[[ZMQ]] +deps = ["FileWatching", "Sockets", "ZeroMQ_jll"] +git-tree-sha1 = "fc68e8a3719166950a0f3e390a14c7302c48f8de" +uuid = "c2297ded-f4af-51ae-bb23-16f91089e4e1" +version = "1.2.1" + +[[ZeroMQ_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "libsodium_jll"] +git-tree-sha1 = "74a74a3896b63980734cc876da8a103454559fe8" +uuid = "8f1865be-045e-5c20-9c9f-bfbfb0764568" +version = "4.3.2+6" + +[[Zlib_jll]] +deps = ["Libdl"] +uuid = "83775a58-1f1d-513f-b197-d71354ab007a" + +[[Zstd_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "cc4bf3fdde8b7e3e9fa0351bdeedba1cf3b7f6e6" +uuid = "3161d3a3-bdf6-5164-811a-617609db77b4" +version = "1.5.0+0" + +[[libass_jll]] +deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] +git-tree-sha1 = "acc685bcf777b2202a904cdcb49ad34c2fa1880c" +uuid = "0ac62f75-1d6f-5e53-bd7c-93b484bb37c0" +version = "0.14.0+4" + +[[libfdk_aac_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "7a5780a0d9c6864184b3a2eeeb833a0c871f00ab" +uuid = "f638f0a6-7fb0-5443-88ba-1cc74229b280" +version = "0.1.6+4" + +[[libpng_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] +git-tree-sha1 = "94d180a6d2b5e55e447e2d27a29ed04fe79eb30c" +uuid = "b53b4c65-9356-5827-b1ea-8c7a1a84506f" +version = "1.6.38+0" + +[[libsodium_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "848ab3d00fe39d6fbc2a8641048f8f272af1c51e" +uuid = "a9144af2-ca23-56d9-984f-0d03f7b5ccf8" +version = "1.0.20+0" + +[[libvorbis_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Ogg_jll", "Pkg"] +git-tree-sha1 = "fa14ac25af7a4b8a7f61b287a124df7aab601bcd" +uuid = "f27f6e37-5d2b-51aa-960f-b287f2bc3b7a" +version = "1.3.6+6" + +[[nghttp2_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" + +[[p7zip_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" + +[[x264_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "d713c1ce4deac133e3334ee12f4adff07f81778f" +uuid = "1270edf5-f2f9-52d2-97e9-ab00b5d0237a" +version = "2020.7.14+2" + +[[x265_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "487da2f8f2f0c8ee0e83f39d13037d6bbf0a45ab" +uuid = "dfaa095f-4041-5dcd-9319-2fabd8486b76" +version = "3.0.0+3" + +[[xkbcommon_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll", "Wayland_protocols_jll", "Xorg_libxcb_jll", "Xorg_xkeyboard_config_jll"] +git-tree-sha1 = "ece2350174195bb31de1a63bea3a41ae1aa593b6" +uuid = "d8fb68d0-12a3-5cfd-a85a-d49703b185fd" +version = "0.9.1+5" diff --git a/tutorials/Testing/Project.toml b/tutorials/Testing/Project.toml new file mode 100644 index 00000000..9c4e0a35 --- /dev/null +++ b/tutorials/Testing/Project.toml @@ -0,0 +1,5 @@ +[deps] +SciMLTutorials = "30cb0354-2223-46a9-baa0-41bdcfbe0178" + +[compat] +SciMLTutorials = "0.9, 1" diff --git a/tutorials/Testing/test.jmd b/tutorials/Testing/test.jmd new file mode 100644 index 00000000..4a909381 --- /dev/null +++ b/tutorials/Testing/test.jmd @@ -0,0 +1,11 @@ +--- +title: Test +author: Chris Rackauckas +--- + +This is a test of the builder system. It often gets bumped manually. + +```julia, echo = false, skip="notebook" +using SciMLTutorials +SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder], WEAVE_ARGS[:file]) +``` diff --git a/tutorials/advanced/01-beeler_reuter.jmd b/tutorials/advanced/01-beeler_reuter.jmd deleted file mode 100644 index 59c4ab01..00000000 --- a/tutorials/advanced/01-beeler_reuter.jmd +++ /dev/null @@ -1,661 +0,0 @@ ---- -title: An Implicit/Explicit CUDA-Accelerated Solver for the 2D Beeler-Reuter Model -author: Shahriar Iravanian ---- - -## Background - -[JuliaDiffEq](https://github.com/JuliaDiffEq) is a suite of optimized Julia libraries to solve ordinary differential equations (ODE). *JuliaDiffEq* provides a large number of explicit and implicit solvers suited for different types of ODE problems. It is possible to reduce a system of partial differential equations into an ODE problem by employing the [method of lines (MOL)](https://en.wikipedia.org/wiki/Method_of_lines). The essence of MOL is to discretize the spatial derivatives (by finite difference, finite volume or finite element methods) into algebraic equations and to keep the time derivatives as is. The resulting differential equations are left with only one independent variable (time) and can be solved with an ODE solver. [Solving Systems of Stochastic PDEs and using GPUs in Julia](http://www.stochasticlifestyle.com/solving-systems-stochastic-pdes-using-gpus-julia/) is a brief introduction to MOL and using GPUs to accelerate PDE solving in *JuliaDiffEq*. Here we expand on this introduction by developing an implicit/explicit (IMEX) solver for a 2D cardiac electrophysiology model and show how to use [CuArray](https://github.com/JuliaGPU/CuArrays.jl) and [CUDAnative](https://github.com/JuliaGPU/CUDAnative.jl) libraries to run the explicit part of the model on a GPU. - -Note that this tutorial does not use the [higher order IMEX methods built into DifferentialEquations.jl](http://docs.juliadiffeq.org/dev/solvers/split_ode_solve.html#Implicit-Explicit-(IMEX)-ODE-1) but instead shows how to hand-split an equation when the explicit portion has an analytical solution (or approxiate), which is common in many scenarios. - -There are hundreds of ionic models that describe cardiac electrical activity in various degrees of detail. Most are based on the classic [Hodgkin-Huxley model](https://en.wikipedia.org/wiki/Hodgkin%E2%80%93Huxley_model) and define the time-evolution of different state variables in the form of nonlinear first-order ODEs. The state vector for these models includes the transmembrane potential, gating variables, and ionic concentrations. The coupling between cells is through the transmembrame potential only and is described as a reaction-diffusion equation, which is a parabolic PDE, - -$$\partial V / \partial t = \nabla (D \nabla V) - \frac {I_\text{ion}} {C_m},$$ - -where $V$ is the transmembrane potential, $D$ is a diffusion tensor, $I_\text{ion}$ is the sum of the transmembrane currents and is calculated from the ODEs, and $C_m$ is the membrane capacitance and is usually assumed to be constant. Here we model a uniform and isotropic medium. Therefore, the model can be simplified to, - -$$\partial V / \partial t = D \Delta{V} - \frac {I_\text{ion}} {C_m},$$ - -where $D$ is now a scalar. By nature, these models have to deal with different time scales and are therefore classified as *stiff*. Commonly, they are solved using the explicit Euler method, usually with a closed form for the integration of the gating variables (the Rush-Larsen method, see below). We can also solve these problems using implicit or semi-implicit PDE solvers (e.g., the [Crank-Nicholson method](https://en.wikipedia.org/wiki/Crank%E2%80%93Nicolson_method) combined with an iterative solver). Higher order explicit methods such as Runge-Kutta and linear multi-step methods cannot overcome the stiffness and are not particularly helpful. - -In this tutorial, we first develop a CPU-only IMEX solver and then show how to move the explicit part to a GPU. - -### The Beeler-Reuter Model - -We have chosen the [Beeler-Reuter ventricular ionic model](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1283659/) as our example. It is a classic model first described in 1977 and is used as a base for many other ionic models. It has eight state variables, which makes it complicated enough to be interesting without obscuring the main points of the exercise. The eight state variables are: the transmembrane potential ($V$), sodium-channel activation and inactivation gates ($m$ and $h$, similar to the Hodgkin-Huxley model), with an additional slow inactivation gate ($j$), calcium-channel activation and deactivations gates ($d$ and $f$), a time-dependent inward-rectifying potassium current gate ($x_1$), and intracellular calcium concentration ($c$). There are four currents: a sodium current ($i_{Na}$), a calcium current ($i_{Ca}$), and two potassium currents, one time-dependent ($i_{x_1}$) and one background time-independent ($i_{K_1}$). - -## CPU-Only Beeler-Reuter Solver - -Let's start by developing a CPU only IMEX solver. The main idea is to use the *DifferentialEquations* framework to handle the implicit part of the equation and code the analytical approximation for explicit part separately. If no analytical approximation was known for the explicit part, one could use methods from [this list](http://docs.juliadiffeq.org/dev/solvers/split_ode_solve.html#Implicit-Explicit-(IMEX)-ODE-1). - -First, we define the model constants: - -```julia -const v0 = -84.624 -const v1 = 10.0 -const C_K1 = 1.0f0 -const C_x1 = 1.0f0 -const C_Na = 1.0f0 -const C_s = 1.0f0 -const D_Ca = 0.0f0 -const D_Na = 0.0f0 -const g_s = 0.09f0 -const g_Na = 4.0f0 -const g_NaC = 0.005f0 -const ENa = 50.0f0 + D_Na -const γ = 0.5f0 -const C_m = 1.0f0 -``` - -Note that the constants are defined as `Float32` and not `Float64`. The reason is that most GPUs have many more single precision cores than double precision ones. To ensure uniformity between CPU and GPU, we also code most states variables as `Float32` except for the transmembrane potential, which is solved by an implicit solver provided by the Sundial library and needs to be `Float64`. - -### The State Structure - -Next, we define a struct to contain our state. `BeelerReuterCpu` is a functor and we will define a deriv function as its associated function. - -```julia -mutable struct BeelerReuterCpu <: Function - t::Float64 # the last timestep time to calculate Δt - diff_coef::Float64 # the diffusion-coefficient (coupling strength) - - C::Array{Float32, 2} # intracellular calcium concentration - M::Array{Float32, 2} # sodium current activation gate (m) - H::Array{Float32, 2} # sodium current inactivation gate (h) - J::Array{Float32, 2} # sodium current slow inactivaiton gate (j) - D::Array{Float32, 2} # calcium current activaiton gate (d) - F::Array{Float32, 2} # calcium current inactivation gate (f) - XI::Array{Float32, 2} # inward-rectifying potassium current (iK1) - - Δu::Array{Float64, 2} # place-holder for the Laplacian - - function BeelerReuterCpu(u0, diff_coef) - self = new() - - ny, nx = size(u0) - self.t = 0.0 - self.diff_coef = diff_coef - - self.C = fill(0.0001f0, (ny,nx)) - self.M = fill(0.01f0, (ny,nx)) - self.H = fill(0.988f0, (ny,nx)) - self.J = fill(0.975f0, (ny,nx)) - self.D = fill(0.003f0, (ny,nx)) - self.F = fill(0.994f0, (ny,nx)) - self.XI = fill(0.0001f0, (ny,nx)) - - self.Δu = zeros(ny,nx) - - return self - end -end -``` - -### Laplacian - -The finite-difference Laplacian is calculated in-place by a 5-point stencil. The Neumann boundary condition is enforced. Note that we could have also used [DiffEqOperators.jl](https://github.com/JuliaDiffEq/DiffEqOperators.jl) to automate this step. - -```julia -# 5-point stencil -function laplacian(Δu, u) - n1, n2 = size(u) - - # internal nodes - for j = 2:n2-1 - for i = 2:n1-1 - @inbounds Δu[i,j] = u[i+1,j] + u[i-1,j] + u[i,j+1] + u[i,j-1] - 4*u[i,j] - end - end - - # left/right edges - for i = 2:n1-1 - @inbounds Δu[i,1] = u[i+1,1] + u[i-1,1] + 2*u[i,2] - 4*u[i,1] - @inbounds Δu[i,n2] = u[i+1,n2] + u[i-1,n2] + 2*u[i,n2-1] - 4*u[i,n2] - end - - # top/bottom edges - for j = 2:n2-1 - @inbounds Δu[1,j] = u[1,j+1] + u[1,j-1] + 2*u[2,j] - 4*u[1,j] - @inbounds Δu[n1,j] = u[n1,j+1] + u[n1,j-1] + 2*u[n1-1,j] - 4*u[n1,j] - end - - # corners - @inbounds Δu[1,1] = 2*(u[2,1] + u[1,2]) - 4*u[1,1] - @inbounds Δu[n1,1] = 2*(u[n1-1,1] + u[n1,2]) - 4*u[n1,1] - @inbounds Δu[1,n2] = 2*(u[2,n2] + u[1,n2-1]) - 4*u[1,n2] - @inbounds Δu[n1,n2] = 2*(u[n1-1,n2] + u[n1,n2-1]) - 4*u[n1,n2] -end -``` - -### The Rush-Larsen Method - -We use an explicit solver for all the state variables except for the transmembrane potential which is solved with the help of an implicit solver. The explicit solver is a domain-specific exponential method, the Rush-Larsen method. This method utilizes an approximation on the model in order to transform the IMEX equation into a form suitable for an implicit ODE solver. This combination of implicit and explicit methods forms a specialized IMEX solver. For general IMEX integration, please see the [IMEX solvers documentation](http://docs.juliadiffeq.org/dev/solvers/split_ode_solve.html#Implicit-Explicit-%28IMEX%29-ODE-1). While we could have used the general model to solve the current problem, for this specific model, the transformation approach is more efficient and is of practical interest. - -The [Rush-Larsen](https://ieeexplore.ieee.org/document/4122859/) method replaces the explicit Euler integration for the gating variables with direct integration. The starting point is the general ODE for the gating variables in Hodgkin-Huxley style ODEs, - -$$\frac{dg}{dt} = \alpha(V) (1 - g) - \beta(V) g$$ - -where $g$ is a generic gating variable, ranging from 0 to 1, and $\alpha$ and $\beta$ are reaction rates. This equation can be written as, - -$$\frac{dg}{dt} = (g_{\infty} - g) / \tau_g,$$ - -where $g_\infty$ and $\tau_g$ are - -$$g_{\infty} = \frac{\alpha}{(\alpha + \beta)},$$ - -and, - -$$\tau_g = \frac{1}{(\alpha + \beta)}.$$ - -Assuing that $g_\infty$ and $\tau_g$ are constant for the duration of a single time step ($\Delta{t}$), which is a reasonable assumption for most cardiac models, we can integrate directly to have, - -$$g(t + \Delta{t}) = g_{\infty} - \left(g_{\infty} - g(\Delta{t})\right)\,e^{-\Delta{t}/\tau_g}.$$ - -This is the Rush-Larsen technique. Note that as $\Delta{t} \rightarrow 0$, this equations morphs into the explicit Euler formula, - -$$g(t + \Delta{t}) = g(t) + \Delta{t}\frac{dg}{dt}.$$ - -`rush_larsen` is a helper function that use the Rush-Larsen method to integrate the gating variables. - -```julia -@inline function rush_larsen(g, α, β, Δt) - inf = α/(α+β) - τ = 1f0 / (α+β) - return clamp(g + (g - inf) * expm1(-Δt/τ), 0f0, 1f0) -end -``` - -The gating variables are updated as below. The details of how to calculate $\alpha$ and $\beta$ are based on the Beeler-Reuter model and not of direct interest to this tutorial. - -```julia -function update_M_cpu(g, v, Δt) - # the condition is needed here to prevent NaN when v == 47.0 - α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (exp(-0.1f0*(v+47.0f0)) - 1.0f0) - β = (40.0f0 * exp(-0.056f0*(v+72.0f0))) - return rush_larsen(g, α, β, Δt) -end - -function update_H_cpu(g, v, Δt) - α = 0.126f0 * exp(-0.25f0*(v+77.0f0)) - β = 1.7f0 / (exp(-0.082f0*(v+22.5f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_J_cpu(g, v, Δt) - α = (0.55f0 * exp(-0.25f0*(v+78.0f0))) / (exp(-0.2f0*(v+78.0f0)) + 1.0f0) - β = 0.3f0 / (exp(-0.1f0*(v+32.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_D_cpu(g, v, Δt) - α = γ * (0.095f0 * exp(-0.01f0*(v-5.0f0))) / (exp(-0.072f0*(v-5.0f0)) + 1.0f0) - β = γ * (0.07f0 * exp(-0.017f0*(v+44.0f0))) / (exp(0.05f0*(v+44.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_F_cpu(g, v, Δt) - α = γ * (0.012f0 * exp(-0.008f0*(v+28.0f0))) / (exp(0.15f0*(v+28.0f0)) + 1.0f0) - β = γ * (0.0065f0 * exp(-0.02f0*(v+30.0f0))) / (exp(-0.2f0*(v+30.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_XI_cpu(g, v, Δt) - α = (0.0005f0 * exp(0.083f0*(v+50.0f0))) / (exp(0.057f0*(v+50.0f0)) + 1.0f0) - β = (0.0013f0 * exp(-0.06f0*(v+20.0f0))) / (exp(-0.04f0*(v+20.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end -``` - -The intracelleular calcium is not technically a gating variable, but we can use a similar explicit exponential integrator for it. - -```julia -function update_C_cpu(g, d, f, v, Δt) - ECa = D_Ca - 82.3f0 - 13.0278f0 * log(g) - kCa = C_s * g_s * d * f - iCa = kCa * (v - ECa) - inf = 1.0f-7 * (0.07f0 - g) - τ = 1f0 / 0.07f0 - return g + (g - inf) * expm1(-Δt/τ) -end -``` - -### Implicit Solver - -Now, it is time to define the derivative function as an associated function of **BeelerReuterCpu**. We plan to use the CVODE_BDF solver as our implicit portion. Similar to other iterative methods, it calls the deriv function with the same $t$ multiple times. For example, these are consecutive $t$s from a representative run: - -0.86830 -0.86830 -0.85485 -0.85485 -0.85485 -0.86359 -0.86359 -0.86359 -0.87233 -0.87233 -0.87233 -0.88598 -... - -Here, every time step is called three times. We distinguish between two types of calls to the deriv function. When $t$ changes, the gating variables are updated by calling `update_gates_cpu`: - -```julia -function update_gates_cpu(u, XI, M, H, J, D, F, C, Δt) - let Δt = Float32(Δt) - n1, n2 = size(u) - for j = 1:n2 - for i = 1:n1 - v = Float32(u[i,j]) - - XI[i,j] = update_XI_cpu(XI[i,j], v, Δt) - M[i,j] = update_M_cpu(M[i,j], v, Δt) - H[i,j] = update_H_cpu(H[i,j], v, Δt) - J[i,j] = update_J_cpu(J[i,j], v, Δt) - D[i,j] = update_D_cpu(D[i,j], v, Δt) - F[i,j] = update_F_cpu(F[i,j], v, Δt) - - C[i,j] = update_C_cpu(C[i,j], D[i,j], F[i,j], v, Δt) - end - end - end -end -``` - -On the other hand, du is updated at each time step, since it is independent of $\Delta{t}$. - -```julia -# iK1 is the inward-rectifying potassium current -function calc_iK1(v) - ea = exp(0.04f0*(v+85f0)) - eb = exp(0.08f0*(v+53f0)) - ec = exp(0.04f0*(v+53f0)) - ed = exp(-0.04f0*(v+23f0)) - return 0.35f0 * (4f0*(ea-1f0)/(eb + ec) - + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed))) -end - -# ix1 is the time-independent background potassium current -function calc_ix1(v, xi) - ea = exp(0.04f0*(v+77f0)) - eb = exp(0.04f0*(v+35f0)) - return xi * 0.8f0 * (ea-1f0) / eb -end - -# iNa is the sodium current (similar to the classic Hodgkin-Huxley model) -function calc_iNa(v, m, h, j) - return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa) -end - -# iCa is the calcium current -function calc_iCa(v, d, f, c) - ECa = D_Ca - 82.3f0 - 13.0278f0 * log(c) # ECa is the calcium reversal potential - return C_s * g_s * d * f * (v - ECa) -end - -function update_du_cpu(du, u, XI, M, H, J, D, F, C) - n1, n2 = size(u) - - for j = 1:n2 - for i = 1:n1 - v = Float32(u[i,j]) - - # calculating individual currents - iK1 = calc_iK1(v) - ix1 = calc_ix1(v, XI[i,j]) - iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j]) - iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j]) - - # total current - I_sum = iK1 + ix1 + iNa + iCa - - # the reaction part of the reaction-diffusion equation - du[i,j] = -I_sum / C_m - end - end -end -``` - -Finally, we put everything together is our deriv function, which is a call on `BeelerReuterCpu`. - -```julia -function (f::BeelerReuterCpu)(du, u, p, t) - Δt = t - f.t - - if Δt != 0 || t == 0 - update_gates_cpu(u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C, Δt) - f.t = t - end - - laplacian(f.Δu, u) - - # calculate the reaction portion - update_du_cpu(du, u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C) - - # ...add the diffusion portion - du .+= f.diff_coef .* f.Δu -end -``` - -### Results - -Time to test! We need to define the starting transmembrane potential with the help of global constants **v0** and **v1**, which represent the resting and activated potentials. - -```julia -const N = 192; -u0 = fill(v0, (N, N)); -u0[90:102,90:102] .= v1; # a small square in the middle of the domain -``` - -The initial condition is a small square in the middle of the domain. - -```julia -using Plots -heatmap(u0) -``` - -Next, the problem is defined: - -```julia -using DifferentialEquations, Sundials - -deriv_cpu = BeelerReuterCpu(u0, 1.0); -prob = ODEProblem(deriv_cpu, u0, (0.0, 50.0)); -``` - -For stiff reaction-diffusion equations, CVODE_BDF from Sundial library is an excellent solver. - -```julia -@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0); -``` - -```julia -heatmap(sol.u[end]) -``` - -## CPU/GPU Beeler-Reuter Solver - -GPUs are great for embarrassingly parallel problems but not so much for highly coupled models. We plan to keep the implicit part on CPU and run the decoupled explicit code on a GPU with the help of the CUDAnative library. - -### GPUs and CUDA - -It this section, we present a brief summary of how GPUs (specifically NVIDIA GPUs) work and how to program them using the Julia CUDA interface. The readers who are familiar with these basic concepts may skip this section. - -Let's start by looking at the hardware of a typical high-end GPU, GTX 1080. It has four Graphics Processing Clusters (equivalent to a discrete CPU), each harboring five Streaming Multiprocessor (similar to a CPU core). Each SM has 128 single-precision CUDA cores. Therefore, GTX 1080 has a total of 4 x 5 x 128 = 2560 CUDA cores. The maximum theoretical throughput for a GTX 1080 is reported as 8.87 TFLOPS. This figure is calculated for a boost clock frequency of 1.733 MHz as 2 x 2560 x 1.733 MHz = 8.87 TFLOPS. The factor 2 is included because two single floating point operations, a multiplication and an addition, can be done in a clock cycle as part of a fused-multiply-addition FMA operation. GTX 1080 also has 8192 MB of global memory accessible to all the cores (in addition to local and shared memory on each SM). - -A typical CUDA application has the following flow: - -1. Define and initialize the problem domain tensors (multi-dimensional arrays) in CPU memory. -2. Allocate corresponding tensors in the GPU global memory. -3. Transfer the input tensors from CPU to the corresponding GPU tensors. -4. Invoke CUDA kernels (i.e., the GPU functions callable from CPU) that operate on the GPU tensors. -5. Transfer the result tensors from GPU back to CPU. -6. Process tensors on CPU. -7. Repeat steps 3-6 as needed. - -Some libraries, such as [ArrayFire](https://github.com/arrayfire/arrayfire), hide the complexicities of steps 2-5 behind a higher level of abstraction. However, here we take a lower level route. By using [CuArray](https://github.com/JuliaGPU/CuArrays.jl) and [CUDAnative](https://github.com/JuliaGPU/CUDAnative.jl), we achieve a finer-grained control and higher performance. In return, we need to implement each step manually. - -*CuArray* is a thin abstraction layer over the CUDA API and allows us to define GPU-side tensors and copy data to and from them but does not provide for operations on tensors. *CUDAnative* is a compiler that translates Julia functions designated as CUDA kernels into ptx (a high-level CUDA assembly language). - -### The CUDA Code - -The key to fast CUDA programs is to minimize CPU/GPU memory transfers and global memory accesses. The implicit solver is currently CPU only, but it only needs access to the transmembrane potential. The rest of state variables reside on the GPU memory. - -We modify ``BeelerReuterCpu`` into ``BeelerReuterGpu`` by defining the state variables as *CuArray*s instead of standard Julia *Array*s. The name of each variable defined on GPU is prefixed by *d_* for clarity. Note that $\Delta{v}$ is a temporary storage for the Laplacian and stays on the CPU side. - -```julia -using CUDAnative, CuArrays - -mutable struct BeelerReuterGpu <: Function - t::Float64 # the last timestep time to calculate Δt - diff_coef::Float64 # the diffusion-coefficient (coupling strength) - - d_C::CuArray{Float32, 2} # intracellular calcium concentration - d_M::CuArray{Float32, 2} # sodium current activation gate (m) - d_H::CuArray{Float32, 2} # sodium current inactivation gate (h) - d_J::CuArray{Float32, 2} # sodium current slow inactivaiton gate (j) - d_D::CuArray{Float32, 2} # calcium current activaiton gate (d) - d_F::CuArray{Float32, 2} # calcium current inactivation gate (f) - d_XI::CuArray{Float32, 2} # inward-rectifying potassium current (iK1) - - d_u::CuArray{Float64, 2} # place-holder for u in the device memory - d_du::CuArray{Float64, 2} # place-holder for d_u in the device memory - - Δv::Array{Float64, 2} # place-holder for voltage gradient - - function BeelerReuterGpu(u0, diff_coef) - self = new() - - ny, nx = size(u0) - @assert (nx % 16 == 0) && (ny % 16 == 0) - self.t = 0.0 - self.diff_coef = diff_coef - - self.d_C = CuArray(fill(0.0001f0, (ny,nx))) - self.d_M = CuArray(fill(0.01f0, (ny,nx))) - self.d_H = CuArray(fill(0.988f0, (ny,nx))) - self.d_J = CuArray(fill(0.975f0, (ny,nx))) - self.d_D = CuArray(fill(0.003f0, (ny,nx))) - self.d_F = CuArray(fill(0.994f0, (ny,nx))) - self.d_XI = CuArray(fill(0.0001f0, (ny,nx))) - - self.d_u = CuArray(u0) - self.d_du = CuArray(zeros(ny,nx)) - - self.Δv = zeros(ny,nx) - - return self - end -end -``` - -The Laplacian function remains unchanged. The main change to the explicit gating solvers is that *exp* and *expm1* functions are prefixed by *CUDAnative.*. This is a technical nuisance that will hopefully be resolved in future. - -```julia -function rush_larsen_gpu(g, α, β, Δt) - inf = α/(α+β) - τ = 1.0/(α+β) - return clamp(g + (g - inf) * CUDAnative.expm1(-Δt/τ), 0f0, 1f0) -end - -function update_M_gpu(g, v, Δt) - # the condition is needed here to prevent NaN when v == 47.0 - α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (CUDAnative.exp(-0.1f0*(v+47.0f0)) - 1.0f0) - β = (40.0f0 * CUDAnative.exp(-0.056f0*(v+72.0f0))) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_H_gpu(g, v, Δt) - α = 0.126f0 * CUDAnative.exp(-0.25f0*(v+77.0f0)) - β = 1.7f0 / (CUDAnative.exp(-0.082f0*(v+22.5f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_J_gpu(g, v, Δt) - α = (0.55f0 * CUDAnative.exp(-0.25f0*(v+78.0f0))) / (CUDAnative.exp(-0.2f0*(v+78.0f0)) + 1.0f0) - β = 0.3f0 / (CUDAnative.exp(-0.1f0*(v+32.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_D_gpu(g, v, Δt) - α = γ * (0.095f0 * CUDAnative.exp(-0.01f0*(v-5.0f0))) / (CUDAnative.exp(-0.072f0*(v-5.0f0)) + 1.0f0) - β = γ * (0.07f0 * CUDAnative.exp(-0.017f0*(v+44.0f0))) / (CUDAnative.exp(0.05f0*(v+44.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_F_gpu(g, v, Δt) - α = γ * (0.012f0 * CUDAnative.exp(-0.008f0*(v+28.0f0))) / (CUDAnative.exp(0.15f0*(v+28.0f0)) + 1.0f0) - β = γ * (0.0065f0 * CUDAnative.exp(-0.02f0*(v+30.0f0))) / (CUDAnative.exp(-0.2f0*(v+30.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_XI_gpu(g, v, Δt) - α = (0.0005f0 * CUDAnative.exp(0.083f0*(v+50.0f0))) / (CUDAnative.exp(0.057f0*(v+50.0f0)) + 1.0f0) - β = (0.0013f0 * CUDAnative.exp(-0.06f0*(v+20.0f0))) / (CUDAnative.exp(-0.04f0*(v+20.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_C_gpu(c, d, f, v, Δt) - ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c) - kCa = C_s * g_s * d * f - iCa = kCa * (v - ECa) - inf = 1.0f-7 * (0.07f0 - c) - τ = 1f0 / 0.07f0 - return c + (c - inf) * CUDAnative.expm1(-Δt/τ) -end -``` - -Similarly, we modify the functions to calculate the individual currents by adding CUDAnative prefix. - -```julia -# iK1 is the inward-rectifying potassium current -function calc_iK1(v) - ea = CUDAnative.exp(0.04f0*(v+85f0)) - eb = CUDAnative.exp(0.08f0*(v+53f0)) - ec = CUDAnative.exp(0.04f0*(v+53f0)) - ed = CUDAnative.exp(-0.04f0*(v+23f0)) - return 0.35f0 * (4f0*(ea-1f0)/(eb + ec) - + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed))) -end - -# ix1 is the time-independent background potassium current -function calc_ix1(v, xi) - ea = CUDAnative.exp(0.04f0*(v+77f0)) - eb = CUDAnative.exp(0.04f0*(v+35f0)) - return xi * 0.8f0 * (ea-1f0) / eb -end - -# iNa is the sodium current (similar to the classic Hodgkin-Huxley model) -function calc_iNa(v, m, h, j) - return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa) -end - -# iCa is the calcium current -function calc_iCa(v, d, f, c) - ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c) # ECa is the calcium reversal potential - return C_s * g_s * d * f * (v - ECa) -end -``` - -### CUDA Kernels - -A CUDA program does not directly deal with GPCs and SMs. The logical view of a CUDA program is in the term of *blocks* and *threads*. We have to specify the number of block and threads when running a CUDA *kernel*. Each thread runs on a single CUDA core. Threads are logically bundled into blocks, which are in turn specified on a grid. The grid stands for the entirety of the domain of interest. - -Each thread can find its logical coordinate by using few pre-defined indexing variables (*threadIdx*, *blockIdx*, *blockDim* and *gridDim*) in C/C++ and the corresponding functions (e.g., `threadIdx()`) in Julia. There variables and functions are defined automatically for each thread and may return a different value depending on the calling thread. The return value of these functions is a 1, 2, or 3 dimensional structure whose elements can be accessed as `.x`, `.y`, and `.z` (for a 1-dimensional case, `.x` reports the actual index and `.y` and `.z` simply return 1). For example, if we deploy a kernel in 128 blocks and with 256 threads per block, each thread will see - -``` - gridDim.x = 128; - blockDim=256; -``` - -while `blockIdx.x` ranges from 0 to 127 in C/C++ and 1 to 128 in Julia. Similarly, `threadIdx.x` will be between 0 to 255 in C/C++ (of course, in Julia the range will be 1 to 256). - -A C/C++ thread can calculate its index as - -``` - int idx = blockDim.x * blockIdx.x + threadIdx.x; -``` - -In Julia, we have to take into account base 1. Therefore, we use the following formula - -``` - idx = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x -``` - -A CUDA programmer is free to interpret the calculated index however it fits the application, but in practice, it is usually interpreted as an index into input tensors. - -In the GPU version of the solver, each thread works on a single element of the medium, indexed by a (x,y) pair. -`update_gates_gpu` and `update_du_gpu` are very similar to their CPU counterparts but are in fact CUDA kernels where the *for* loops are replaced with CUDA specific indexing. Note that CUDA kernels cannot return a valve; hence, *nothing* at the end. - -```julia -function update_gates_gpu(u, XI, M, H, J, D, F, C, Δt) - i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x - j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y - - v = Float32(u[i,j]) - - let Δt = Float32(Δt) - XI[i,j] = update_XI_gpu(XI[i,j], v, Δt) - M[i,j] = update_M_gpu(M[i,j], v, Δt) - H[i,j] = update_H_gpu(H[i,j], v, Δt) - J[i,j] = update_J_gpu(J[i,j], v, Δt) - D[i,j] = update_D_gpu(D[i,j], v, Δt) - F[i,j] = update_F_gpu(F[i,j], v, Δt) - - C[i,j] = update_C_gpu(C[i,j], D[i,j], F[i,j], v, Δt) - end - nothing -end - -function update_du_gpu(du, u, XI, M, H, J, D, F, C) - i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x - j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y - - v = Float32(u[i,j]) - - # calculating individual currents - iK1 = calc_iK1(v) - ix1 = calc_ix1(v, XI[i,j]) - iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j]) - iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j]) - - # total current - I_sum = iK1 + ix1 + iNa + iCa - - # the reaction part of the reaction-diffusion equation - du[i,j] = -I_sum / C_m - nothing -end -``` - -### Implicit Solver - -Finally, the deriv function is modified to copy *u* to GPU and copy *du* back and to invoke CUDA kernels. - -```julia -function (f::BeelerReuterGpu)(du, u, p, t) - L = 16 # block size - Δt = t - f.t - copyto!(f.d_u, u) - ny, nx = size(u) - - if Δt != 0 || t == 0 - @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_gates_gpu( - f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C, Δt) - f.t = t - end - - laplacian(f.Δv, u) - - # calculate the reaction portion - @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_du_gpu( - f.d_du, f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C) - - copyto!(du, f.d_du) - - # ...add the diffusion portion - du .+= f.diff_coef .* f.Δv -end -``` - -Ready to test! - -```julia -using DifferentialEquations, Sundials - -deriv_gpu = BeelerReuterGpu(u0, 1.0); -prob = ODEProblem(deriv_gpu, u0, (0.0, 50.0)); -@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0); -``` - -```julia -heatmap(sol.u[end]) -``` - -## Summary - -We achieve around a 6x speedup with running the explicit portion of our IMEX solver on a GPU. The major bottleneck of this technique is the communication between CPU and GPU. In its current form, not all of the internals of the method utilize GPU acceleration. In particular, the implicit equations solved by GMRES are performed on the CPU. This partial CPU nature also increases the amount of data transfer that is required between the GPU and CPU (performed every f call). Compiling the full ODE solver to the GPU would solve both of these issues and potentially give a much larger speedup. [JuliaDiffEq developers are currently working on solutions to alleviate these issues](http://www.stochasticlifestyle.com/solving-systems-stochastic-pdes-using-gpus-julia/), but these will only be compatible with native Julia solvers (and not Sundials). - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/advanced/02-advanced_ODE_solving.jmd b/tutorials/advanced/02-advanced_ODE_solving.jmd deleted file mode 100644 index 44090834..00000000 --- a/tutorials/advanced/02-advanced_ODE_solving.jmd +++ /dev/null @@ -1,506 +0,0 @@ ---- -title: Solving Stiff Equations -author: Chris Rackauckas ---- - -This tutorial is for getting into the extra features for solving stiff ordinary -differential equations in an efficient manner. Solving stiff ordinary -differential equations requires specializing the linear solver on properties of -the Jacobian in order to cut down on the O(n^3) linear solve and the O(n^2) -back-solves. Note that these same functions and controls also extend to stiff -SDEs, DDEs, DAEs, etc. - -## Code Optimization for Differential Equations - -### Writing Efficient Code - -For a detailed tutorial on how to optimize one's DifferentialEquations.jl code, -please see the -[Optimizing DiffEq Code tutorial](http://tutorials.juliadiffeq.org/html/introduction/03-optimizing_diffeq_code.html). - -### Choosing a Good Solver - -Choosing a good solver is required for getting top notch speed. General -recommendations can be found on the solver page (for example, the -[ODE Solver Recommendations](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html)). -The current recommendations can be simplified to a Rosenbrock method -(`Rosenbrock23` or `Rodas5`) for smaller (<50 ODEs) problems, ESDIRK methods -for slightly larger (`TRBDF2` or `KenCarp4` for <2000 ODEs), and Sundials -`CVODE_BDF` for even larger problems. `lsoda` from -[LSODA.jl](https://github.com/rveltz/LSODA.jl) is generally worth a try. - -More details on the solver to choose can be found by benchmarking. See the -[DiffEqBenchmarks](https://github.com/JuliaDiffEq/DiffEqBenchmarks.jl) to -compare many solvers on many problems. - -### Check Out the Speed FAQ - -See [this FAQ](http://docs.juliadiffeq.org/dev/basics/faq.html#Performance-1) -for information on common pitfalls and how to improve performance. - -### Setting Up Your Julia Installation for Speed - -Julia uses an underlying BLAS implementation for its matrix multiplications -and factorizations. This library is automatically multithreaded and accelerates -the internal linear algebra of DifferentialEquations.jl. However, for optimality, -you should make sure that the number of BLAS threads that you are using matches -the number of physical cores and not the number of logical cores. See -[this issue for more details](https://github.com/JuliaLang/julia/issues/33409). - -To check the number of BLAS threads, use: - -```julia -ccall((:openblas_get_num_threads64_, Base.libblas_name), Cint, ()) -``` - -If I want to set this directly to 4 threads, I would use: - -```julia -using LinearAlgebra -LinearAlgebra.BLAS.set_num_threads(4) -``` - -Additionally, in some cases Intel's MKL might be a faster BLAS than the standard -BLAS that ships with Julia (OpenBLAS). To switch your BLAS implementation, you -can use [MKL.jl](https://github.com/JuliaComputing/MKL.jl) which will accelerate -the linear algebra routines. Please see the package for the limitations. - -### Use Accelerator Hardware - -When possible, use GPUs. If your ODE system is small and you need to solve it -with very many different parameters, see the -[ensembles interface](http://docs.juliadiffeq.org/dev/features/ensemble.html) -and [DiffEqGPU.jl](https://github.com/JuliaDiffEq/DiffEqGPU.jl). If your problem -is large, consider using a [CuArray](https://github.com/JuliaGPU/CuArrays.jl) -for the state to allow for GPU-parallelism of the internal linear algebra. - -## Speeding Up Jacobian Calculations - -When one is using an implicit or semi-implicit differential equation solver, -the Jacobian must be built at many iterations and this can be one of the most -expensive steps. There are two pieces that must be optimized in order to reach -maximal efficiency when solving stiff equations: the sparsity pattern and the -construction of the Jacobian. The construction is filling the matrix -`J` with values, while the sparsity pattern is what `J` to use. - -The sparsity pattern is given by a prototype matrix, the `jac_prototype`, which -will be copied to be used as `J`. The default is for `J` to be a `Matrix`, -i.e. a dense matrix. However, if you know the sparsity of your problem, then -you can pass a different matrix type. For example, a `SparseMatrixCSC` will -give a sparse matrix. Additionally, structured matrix types like `Tridiagonal`, -`BandedMatrix` (from -[BandedMatrices.jl](https://github.com/JuliaMatrices/BandedMatrices.jl)), -`BlockBandedMatrix` (from -[BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl)), -and more can be given. DifferentialEquations.jl will internally use this matrix -type, making the factorizations faster by utilizing the specialized forms. - -For the construction, there are 3 ways to fill `J`: - -- The default, which uses normal finite/automatic differentiation -- A function `jac(J,u,p,t)` which directly computes the values of `J` -- A `colorvec` which defines a sparse differentiation scheme. - -We will now showcase how to make use of this functionality with growing complexity. - -### Declaring Jacobian Functions - -Let's solve the Rosenbrock equations: - -$$\begin{align} -dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ -dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ -dy_3 &= 3*10^7 y_{3}^2 \\ -\end{align}$$ - -In order to reduce the Jacobian construction cost, one can describe a Jacobian -function by using the `jac` argument for the `ODEFunction`. First, let's do -a standard `ODEProblem`: - -```julia -using DifferentialEquations -function rober(du,u,p,t) - y₁,y₂,y₃ = u - k₁,k₂,k₃ = p - du[1] = -k₁*y₁+k₃*y₂*y₃ - du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃ - du[3] = k₂*y₂^2 - nothing -end -prob = ODEProblem(rober,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) -sol = solve(prob,Rosenbrock23()) - -using Plots -plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) -``` - -```julia -using BenchmarkTools -@btime solve(prob) -``` - -Now we want to add the Jacobian. First we have to derive the Jacobian -$\frac{df_i}{du_j}$ which is `J[i,j]`. From this we get: - -```julia -function rober_jac(J,u,p,t) - y₁,y₂,y₃ = u - k₁,k₂,k₃ = p - J[1,1] = k₁ * -1 - J[2,1] = k₁ - J[3,1] = 0 - J[1,2] = y₃ * k₃ - J[2,2] = y₂ * k₂ * -2 + y₃ * k₃ * -1 - J[3,2] = y₂ * 2 * k₂ - J[1,3] = k₃ * y₂ - J[2,3] = k₃ * y₂ * -1 - J[3,3] = 0 - nothing -end -f = ODEFunction(rober, jac=rober_jac) -prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) - -@btime solve(prob_jac) -``` - -### Automatic Derivation of Jacobian Functions - -But that was hard! If you want to take the symbolic Jacobian of numerical -code, we can make use of [ModelingToolkit.jl](https://github.com/JuliaDiffEq/ModelingToolkit.jl) -to symbolicify the numerical code and do the symbolic calculation and return -the Julia code for this. - -```julia -using ModelingToolkit -de = modelingtoolkitize(prob) -ModelingToolkit.generate_jacobian(de...)[2] # Second is in-place -``` - -which outputs: - -```julia;eval=false -:((##MTIIPVar#376, u, p, t)->begin - #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:65 =# - #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:66 =# - let (x₁, x₂, x₃, α₁, α₂, α₃) = (u[1], u[2], u[3], p[1], p[2], p[3]) - ##MTIIPVar#376[1] = α₁ * -1 - ##MTIIPVar#376[2] = α₁ - ##MTIIPVar#376[3] = 0 - ##MTIIPVar#376[4] = x₃ * α₃ - ##MTIIPVar#376[5] = x₂ * α₂ * -2 + x₃ * α₃ * -1 - ##MTIIPVar#376[6] = x₂ * 2 * α₂ - ##MTIIPVar#376[7] = α₃ * x₂ - ##MTIIPVar#376[8] = α₃ * x₂ * -1 - ##MTIIPVar#376[9] = 0 - end - #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:67 =# - nothing - end) -``` - -Now let's use that to give the analytical solution Jacobian: - -```julia -jac = eval(ModelingToolkit.generate_jacobian(de...)[2]) -f = ODEFunction(rober, jac=jac) -prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) -``` - -### Declaring a Sparse Jacobian - -Jacobian sparsity is declared by the `jac_prototype` argument in the `ODEFunction`. -Note that you should only do this if the sparsity is high, for example, 0.1% -of the matrix is non-zeros, otherwise the overhead of sparse matrices can be higher -than the gains from sparse differentiation! - -But as a demonstration, let's build a sparse matrix for the Rober problem. We -can do this by gathering the `I` and `J` pairs for the non-zero components, like: - -```julia -I = [1,2,1,2,3,1,2] -J = [1,1,2,2,2,3,3] -using SparseArrays -jac_prototype = sparse(I,J,1.0) -``` - -Now this is the sparse matrix prototype that we want to use in our solver, which -we then pass like: - -```julia -f = ODEFunction(rober, jac=jac, jac_prototype=jac_prototype) -prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) -``` - -### Automatic Sparsity Detection - -One of the useful companion tools for DifferentialEquations.jl is -[SparsityDetection.jl](https://github.com/JuliaDiffEq/SparsityDetection.jl). -This allows for automatic declaration of Jacobian sparsity types. To see this -in action, let's look at the 2-dimensional Brusselator equation: - -```julia -const N = 32 -const xyd_brusselator = range(0,stop=1,length=N) -brusselator_f(x, y, t) = (((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) * (t >= 1.1) * 5. -limit(a, N) = a == N+1 ? 1 : a == 0 ? N : a -function brusselator_2d_loop(du, u, p, t) - A, B, alpha, dx = p - alpha = alpha/dx^2 - @inbounds for I in CartesianIndices((N, N)) - i, j = Tuple(I) - x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]] - ip1, im1, jp1, jm1 = limit(i+1, N), limit(i-1, N), limit(j+1, N), limit(j-1, N) - du[i,j,1] = alpha*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) + - B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t) - du[i,j,2] = alpha*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) + - A*u[i,j,1] - u[i,j,1]^2*u[i,j,2] - end -end -p = (3.4, 1., 10., step(xyd_brusselator)) -``` - -Given this setup, we can give and example `input` and `output` and call `sparsity!` -on our function with the example arguments and it will kick out a sparse matrix -with our pattern, that we can turn into our `jac_prototype`. - -```julia -using SparsityDetection, SparseArrays -input = rand(32,32,2) -output = similar(input) -sparsity_pattern = sparsity!(brusselator_2d_loop,output,input,p,0.0) -jac_sparsity = Float64.(sparse(sparsity_pattern)) -``` - -Let's double check what our sparsity pattern looks like: - -```julia -using Plots -spy(jac_sparsity,markersize=1,colorbar=false,color=:deep) -``` - -That's neat, and would be tedius to build by hand! Now we just pass it to the -`ODEFunction` like as before: - -```julia -f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity) -``` - -Build the `ODEProblem`: - -```julia -function init_brusselator_2d(xyd) - N = length(xyd) - u = zeros(N, N, 2) - for I in CartesianIndices((N, N)) - x = xyd[I[1]] - y = xyd[I[2]] - u[I,1] = 22*(y*(1-y))^(3/2) - u[I,2] = 27*(x*(1-x))^(3/2) - end - u -end -u0 = init_brusselator_2d(xyd_brusselator) -prob_ode_brusselator_2d = ODEProblem(brusselator_2d_loop, - u0,(0.,11.5),p) - -prob_ode_brusselator_2d_sparse = ODEProblem(f, - u0,(0.,11.5),p) -``` - -Now let's see how the version with sparsity compares to the version without: - -```julia -@btime solve(prob_ode_brusselator_2d,save_everystep=false) -@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false) -``` - -### Declaring Color Vectors for Fast Construction - -If you cannot directly define a Jacobian function, you can use the `colorvec` -to speed up the Jacobian construction. What the `colorvec` does is allows for -calculating multiple columns of a Jacobian simultaniously by using the sparsity -pattern. An explanation of matrix coloring can be found in the -[MIT 18.337 Lecture Notes](https://mitmath.github.io/18337/lecture9/stiff_odes). - -To perform general matrix coloring, we can use -[SparseDiffTools.jl](https://github.com/JuliaDiffEq/SparseDiffTools.jl). For -example, for the Brusselator equation: - -```julia -using SparseDiffTools -colorvec = matrix_colors(jac_sparsity) -@show maximum(colorvec) -``` - -This means that we can now calculate the Jacobian in 12 function calls. This is -a nice reduction from 2048 using only automated tooling! To now make use of this -inside of the ODE solver, you simply need to declare the colorvec: - -```julia -f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity, - colorvec=colorvec) -prob_ode_brusselator_2d_sparse = ODEProblem(f, - init_brusselator_2d(xyd_brusselator), - (0.,11.5),p) -@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false) -``` - -Notice the massive speed enhancement! - -## Defining Linear Solver Routines and Jacobian-Free Newton-Krylov - -A completely different way to optimize the linear solvers for large sparse -matrices is to use a Krylov subpsace method. This requires choosing a linear -solver for changing to a Krylov method. Optionally, one can use a Jacobian-free -operator to reduce the memory requirements. - -### Declaring a Jacobian-Free Newton-Krylov Implementation - -To swap the linear solver out, we use the `linsolve` command and choose the -GMRES linear solver. - -```julia -@btime solve(prob_ode_brusselator_2d,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) -@btime solve(prob_ode_brusselator_2d_sparse,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) -``` - -For more information on linear solver choices, see the -[linear solver documentation](http://docs.juliadiffeq.org/dev/features/linear_nonlinear.html). - -On this problem, handling the sparsity correctly seemed to give much more of a -speedup than going to a Krylov approach, but that can be dependent on the problem -(and whether a good preconditioner is found). - -We can also enhance this by using a Jacobian-Free implementation of `f'(x)*v`. -To define the Jacobian-Free operator, we can use -[DiffEqOperators.jl](https://github.com/JuliaDiffEq/DiffEqOperators.jl) to generate -an operator `JacVecOperator` such that `Jv*v` performs `f'(x)*v` without building -the Jacobian matrix. - -```julia -using DiffEqOperators -Jv = JacVecOperator(brusselator_2d_loop,u0,p,0.0) -``` - -and then we can use this by making it our `jac_prototype`: - -```julia -f = ODEFunction(brusselator_2d_loop;jac_prototype=Jv) -prob_ode_brusselator_2d_jacfree = ODEProblem(f,u0,(0.,11.5),p) -@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) -``` - -### Adding a Preconditioner - -The [linear solver documentation](http://docs.juliadiffeq.org/dev/features/linear_nonlinear.html#IterativeSolvers.jl-Based-Methods-1) -shows how you can add a preconditioner to the GMRES. For example, you can -use packages like [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl) -to add an algebraic multigrid (AMG) or [IncompleteLU.jl](https://github.com/haampie/IncompleteLU.jl) -for an incomplete LU-factorization (iLU). - -```julia -using AlgebraicMultigrid -pc = aspreconditioner(ruge_stuben(jac_sparsity)) -@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES(Pl=pc)),save_everystep=false) -``` - -## Using Structured Matrix Types - -If your sparsity pattern follows a specific structure, for example a banded -matrix, then you can declare `jac_prototype` to be of that structure and then -additional optimizations will come for free. Note that in this case, it is -not necessary to provide a `colorvec` since the color vector will be analytically -derived from the structure of the matrix. - -The matrices which are allowed are those which satisfy the -[ArrayInterface.jl](https://github.com/JuliaDiffEq/ArrayInterface.jl) interface -for automatically-colorable matrices. These include: - -- Bidiagonal -- Tridiagonal -- SymTridiagonal -- BandedMatrix ([BandedMatrices.jl](https://github.com/JuliaMatrices/BandedMatrices.jl)) -- BlockBandedMatrix ([BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl)) - -Matrices which do not satisfy this interface can still be used, but the matrix -coloring will not be automatic, and an appropriate linear solver may need to -be given (otherwise it will default to attempting an LU-decomposition). - -## Sundials-Specific Handling - -While much of the setup makes the transition to using Sundials automatic, there -are some differences between the pure Julia implementations and the Sundials -implementations which must be taken note of. These are all detailed in the -[Sundials solver documentation](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html#Sundials.jl-1), -but here we will highlight the main details which one should make note of. - -Defining a sparse matrix and a Jacobian for Sundials works just like any other -package. The core difference is in the choice of the linear solver. With Sundials, -the linear solver choice is done with a Symbol in the `linear_solver` from a -preset list. Particular choices of note are `:Band` for a banded matrix and -`:GMRES` for using GMRES. If you are using Sundials, `:GMRES` will not require -defining the JacVecOperator, and instead will always make use of a Jacobian-Free -Newton Krylov (with numerical differentiation). Thus on this problem we could do: - -```julia -using Sundials -# Sparse Version -@btime solve(prob_ode_brusselator_2d_sparse,CVODE_BDF(),save_everystep=false) -# GMRES Version: Doesn't require any extra stuff! -@btime solve(prob_ode_brusselator_2d,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) -``` - -Details for setting up a preconditioner with Sundials can be found at the -[Sundials solver page](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html#Sundials.jl-1). - -## Handling Mass Matrices - -Instead of just defining an ODE as $u' = f(u,p,t)$, it can be common to express -the differential equation in the form with a mass matrix: - -$$Mu' = f(u,p,t)$$ - -where $M$ is known as the mass matrix. Let's solve the Robertson equation. -At the top we wrote this equation as: - -$$\begin{align} -dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ -dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ -dy_3 &= 3*10^7 y_{3}^2 \\ -\end{align}$$ - -But we can instead write this with a conservation relation: - -$$\begin{align} -dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ -dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ -1 &= y_{1} + y_{2} + y_{3} \\ -\end{align}$$ - -In this form, we can write this as a mass matrix ODE where $M$ is singular -(this is another form of a differential-algebraic equation (DAE)). Here, the -last row of `M` is just zero. We can implement this form as: - -```julia -using DifferentialEquations -function rober(du,u,p,t) - y₁,y₂,y₃ = u - k₁,k₂,k₃ = p - du[1] = -k₁*y₁+k₃*y₂*y₃ - du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃ - du[3] = y₁ + y₂ + y₃ - 1 - nothing -end -M = [1. 0 0 - 0 1. 0 - 0 0 0] -f = ODEFunction(rober,mass_matrix=M) -prob_mm = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) -sol = solve(prob_mm,Rodas5()) - -plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) -``` - -Note that if your mass matrix is singular, i.e. your system is a DAE, then you -need to make sure you choose -[a solver that is compatible with DAEs](http://docs.juliadiffeq.org/dev/solvers/dae_solve.html#Full-List-of-Methods-1) diff --git a/tutorials/exercises/01-workshop_exercises.jmd b/tutorials/exercises/01-workshop_exercises.jmd deleted file mode 100644 index 5f4273cf..00000000 --- a/tutorials/exercises/01-workshop_exercises.jmd +++ /dev/null @@ -1,680 +0,0 @@ ---- -title: DifferentialEquations.jl Workshop Exercises -author: Chris Rackauckas ---- - -These exercises teach common workflows which involve DifferentialEquations.jl. -The designation (B) is for "Beginner", meaning that a user new to the package -should feel comfortable trying this exercise. An exercise designated (I) is -for "Intermediate", meaning the user may want to have some previous background -in DifferentialEquations.jl or try some (B) exercises first. The additional -(E) designation is for "Experienced", which are portions of exercises which may -take some work. - -The exercises are described as follows: - -- Exercise 1 takes the user through solving a stiff ordinary differential equation - and using the ModelingToolkit.jl to automatically convert the function to a - symbolic form to derive the analytical Jacobian to speed up the solver. The - same biological system is then solved with stochasticity, utilizing - EnsembleProblems to understand 95% bounds on the solution. Finally, - probabilistic programming is employed to perform Bayesian parameter estimation - of the parameters against data. -- Exercise 2 takes the user through defining hybrid delay differential equation, - that is a differential equation with events, and using differentiable programming - techniques (automatic differentiation) to to perform gradient-based parameter - estimation. -- Exercise 3 takes the user through differential-algebraic equation (DAE) - modeling, the concept of index, and using both mass-matrix and implicit - ODE representations. This will require doing a bit of math, but the student - will understand how to change their equations to make their DAE numerically - easier for the integrators. -- Exercise 4 takes the user through optimizing a PDE solver, utilizing - automatic sparsity pattern recognition, automatic conversion of numerical - codes to symbolic codes for analytical construction of the Jacobian, - preconditioned GMRES, and setting up a solver for IMEX and GPUs, and compute - adjoints of PDEs. -- Exercise 5 focuses on a chaotic orbit, utilizing parallel ensembles across - supercomputers and GPUs to quickly describe phase space. -- Exercise 6 takes the user through training a neural stochastic differential - equation, using GPU-accleration and adjoints through Flux.jl's neural - network framework to build efficient training codes. - -This exercise worksheet is meant to be a living document leading new users through -a deep dive of the DifferentialEquations.jl feature set. If you further suggestions -or want to contribute new problems, please open an issue or PR at the -DiffEqTutorials.jl repository. - -# Problem 1: Investigating Sources of Randomness and Uncertainty in a Stiff Biological System (B) - -In this problem we will walk through the basics of simulating models with -DifferentialEquations.jl. Let's take the -[Oregonator model of the Belousov-Zhabotinskii chemical reaction system](https://www.radford.edu/~thompson/vodef90web/problems/demosnodislin/Demos_Pitagora/DemoOrego/demoorego.pdf). -This system describes a classical example in non-equilibrium thermodynmics -and is a well-known natural chemical oscillator. - -## Part 1: Simulating the Oregonator ODE model - -When modeling, usually one starts off by investigating the deterministic model. -The deterministic ODE formulation of the Oregonator is -given by the equations - -$$\begin{align} -\frac{dx}{dt} &= s(y-xy + x - qx^2)\\ -\frac{dy}{dt} &= (-y - xy + z)/s\\ -\frac{dz}{dt} &= w(x - z)\end{align}$$ - -with parameter values $s=77.27$, $w=0.161$, and $q=8.375 \times 10^{-6}$, and -initial conditions $x(0)=1$, $y(0)=2$, and $z(0)=3$. Use -[the tutorial on solving ODEs](http://docs.juliadiffeq.org/dev/tutorials/ode_example.html) -to solve this differential equation on the -timespan of $t\in[0,360]$ with the default ODE solver. To investigate the result, -plot the solution of all components over time, and plot the phase space plot of -the solution (hint: use `vars=(1,2,3)`). What shape is being drawn in phase space? - -## Part 2: Investigating Stiffness - -Because the reaction rates of `q` vs `s` is very large, this model has a "fast" -system and a "slow" system. This is typical of ODEs which exhibit a property -known as stiffness. Stiffness changes the ODE solvers which can handle the -equation well. [Take a look at the ODE solver page](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html) -and investigate solving the equation using methods for non-stiff equations -(ex: `Tsit5`) and stiff equations (ex: `Rodas5`). - -Benchmark using $t\in[0,50]$ using `@btime` from BenchmarkTools.jl. What -happens when you increase the timespan? - -## (Optional) Part 3: Specifying Analytical Jacobians (I) - -Stiff ODE solvers internally utilize the Jacobian of the ODE system in order -to improve the stepsizes in the solution. However, computing and factorizing -the Jacobian is costly, and thus it can be beneficial to provide the analytical -solution. - -Use the -[ODEFunction definition page](http://docs.juliadiffeq.org/dev/features/performance_overloads.html) -to define an `ODEFunction` which holds both the OREGO ODE and its Jacobian, and solve using `Rodas5`. - -## (Optional) Part 4: Automatic Symbolicification and Analytical Jacobian Calculations - -Deriving Jacobians by hand is tedious. Thankfully symbolic mathematical systems -can do the work for you. And thankfully, DifferentialEquations.jl has tools -to automatically convert numerical problems into symbolic problems to perform -the analysis on! - -follow the [ModelingToolkit.jl README](https://github.com/JuliaDiffEq/ModelingToolkit.jl) -to automatically convert your ODE definition -to its symbolic form using `modelingtoolkitize` and calculate the analytical -Jacobian. Use the compilation functions to build the `ODEFunction` with the -embedded analytical solution. - -## Part 5: Adding stochasticity with stochastic differential equations - -How does this system react in the presense of stochasticity? We can investigate -this question by using stochastic differential equations. A stochastic -differential equation formulation of this model is known as the multiplicative -noise model, is created with: - -$$\begin{align} -dx &= s(y-xy + x - qx^2)dt + \sigma_1 x dW_1\\ -dy &= \frac{-y - xy + z}{s}dt + \sigma_2 y dW_2\\ -dz &= w(x - z)dt + \sigma_3 z dW_3\end{align}$$ - -with $\sigma_i = 0.1$ where the `dW` terms describe a Brownian motion, a -continuous random process with normally distributed increments. Use the -[tutorial on solving SDEs](http://docs.juliadiffeq.org/dev/tutorials/sde_example.html) -to solve simulate this model. Then, -[use the `EnsembleProblem`](http://docs.juliadiffeq.org/dev/features/ensemble.html) -to generate and plot 100 trajectories of the stochastic model, and use -`EnsembleSummary` to plot the mean and 5%-95% region over time. - -Try solving with the `ImplicitRKMil` and `SOSRI` methods. Notice that it isn't -stiff every single time! - -(For fun, see if you can make the Euler-Maruyama `EM()` method solve this equation. -This requires a choice of `dt` small enough to be stable. This is the "standard" -method!) - -## Part 6: Gillespie jump models of discrete stochasticity - -When biological models have very few particles, continuous models no longer -make sense, and instead using the full discrete formulation can be required -to accuracy describe the dynamics. A discrete differential equation, or -Gillespie model, is a continuous-time Markov chain with Poisson-distributed -jumps. A discrete description of the Oregonator model is given by a chemical -reaction systems: - -```{julia;eval=false} -A+Y -> X+P -X+Y -> 2P -A+X -> 2X + 2Z -2X -> A + P (note: this has rate kX^2!) -B + Z -> Y -``` - -where reactions take place at a rate which is propoertional to its components, -i.e. the first reaction has a rate `k*A*Y` for some `k`. -Use the [tutorial on Gillespie SSA models](http://docs.juliadiffeq.org/dev/tutorials/discrete_stochastic_example.html) -to implement the `JumpProblem` for this model, and use the `EnsembleProblem` -and `EnsembleSummary` to characterize the stochastic trajectories. - -For what rate constants does the model give the oscillatory dynamics for the -ODE approximation? For information on the true reaction rates, consult -[the original paper](https://pubs.acs.org/doi/abs/10.1021/ja00780a001). - -## Part 7: Probabilistic Programming / Bayesian Parameter Estimation with DiffEqBayes.jl + Turing.jl (I) - -In many casees, one comes to understand the proper values for their model's -parameters by utilizing data fitting techniques. In this case, we will use -the DiffEqBayes.jl library to perform a Bayesian estimation of the parameters. -For our data we will the following potential output: - -```{julia;eval=false} -t = 0.0:1.0:30.0 -data = [1.0 2.05224 2.11422 2.1857 2.26827 2.3641 2.47618 2.60869 2.7677 2.96232 3.20711 3.52709 3.97005 4.64319 5.86202 9.29322 536.068 82388.9 57868.4 1.00399 1.00169 1.00117 1.00094 1.00082 1.00075 1.0007 1.00068 1.00066 1.00065 1.00065 1.00065 - 2.0 1.9494 1.89645 1.84227 1.78727 1.73178 1.67601 1.62008 1.56402 1.50772 1.45094 1.39322 1.33366 1.2705 1.19958 1.10651 0.57194 0.180316 0.431409 251.774 591.754 857.464 1062.78 1219.05 1335.56 1419.88 1478.22 1515.63 1536.25 1543.45 1539.98 - 3.0 2.82065 2.68703 2.58974 2.52405 2.48644 2.47449 2.48686 2.52337 2.58526 2.67563 2.80053 2.9713 3.21051 3.5712 4.23706 12.0266 14868.8 24987.8 23453.4 19202.2 15721.6 12872.0 10538.8 8628.66 7064.73 5784.29 4735.96 3877.66 3174.94 2599.6] -``` - -[Follow the exmaples on the parameter estimation page](http://docs.juliadiffeq.org/dev/analysis/parameter_estimation.html#Bayesian-Methods-1) -to perform a Bayesian parameter estimation. What are the most likely parameters -for the model given the posterior parameter distributions? - -Use the `ODEProblem` to perform the fit. If you have time, use the `EnsembleProblem` -of `SDEProblem`s to perform a fit over averages of the SDE solutions. Note that -the SDE fit will take significantly more computational resources! See the GPU -parallelism section for details on how to accelerate this. - -## (Optional) Part 8: Using DiffEqBiological's Reaction Network DSL - -DiffEqBiological.jl is a helper library for the DifferentialEquations.jl -ecosystem for defining chemical reaction systems at a high leevel for easy -simulation in these various forms. Use the descrption -[from the Chemical Reaction Networks documentation page](http://docs.juliadiffeq.org/dev/models/biological.html) -to build a reaction network and generate the ODE/SDE/jump equations, and -compare the result to your handcoded versions. - -# Problem 2: Fitting Hybrid Delay Pharmacokinetic Models with Automated Responses (B) - -Hybrid differential equations are differential equations with events, where -events are some interaction that occurs according to a prespecified condition. -For example, the bouncing ball is a classic hybrid differential equation given -by an ODE (Newton's Law of Gravity) mixed with the fact that, whenever the -ball hits the floor (`x=0`), then the velocity of the ball flips (`v=-v`). - -In addition, many models incorporate delays, that is the driving force of the -equation is dependent not on the current values, but values from the past. -These delay differential equations model how individuals in the economy act -on old information, or that biological processes take time to adapt to a new -environment. - -In this equation we will build a hybrid delayed pharmacokinetic model and -use the parameter estimation techniques to fit this it to a data. - -## Part 1: Defining an ODE with Predetermined Doses - -First, let's define the simplest hybrid ordinary differential equation: an ODE -where the events take place at fixed times. The ODE we will use is known as -the one-compartment model: - -$$\begin{align} -\frac{d[Depot]}{dt} &= -K_a [Depot] + R\\ -\frac{d[Central]}{dt} &= K_a [Depot] - K_e [Central]\end{align}$$ - -with $t \in [0,90]$, $u_0 = [100.0,0]$, and $p=[K_a,K_e]=[2.268,0.07398]$. - -With this model, use [the event handling documentation page](http://docs.juliadiffeq.org/dev/features/callback_functions.html) -to define a `DiscreteCallback` which fires at `t ∈ [24,48,72]` and adds a -dose of 100 into `[Depot]`. (Hint: you'll want to set `tstops=[24,48,72]` to -force the ODE solver to step at these times). - -## Part 2: Adding Delays - -Now let's assume that instead of there being one compartment, there are many -transit compartment that the drug must move through in order to reach the -central compartment. This effectively delays the effect of the transition from -`[Depot]` to `[Central]`. To model this effect, we will use the delay -differential equation which utilizes a fixed time delay $\tau$: - -$$\begin{align} -\frac{d[Depot]}{dt} &= -K_a [Depot](t)\\ -\frac{d[Central]}{dt} &= K_a [Depot](t-\tau) - K_e [Central]\end{align}$$ - -where the parameter $τ = 6.0$. -[Use the DDE tutorial](http://docs.juliadiffeq.org/dev/tutorials/dde_example.html) -to define and solve this delayed version of the hybrid model. - -## Part 3: Automatic Differentiation (AD) for Optimization (I) - -In order to fit parameters $(K_a,K_e,\tau)$ we will want to be able to calculate -the gradient of the solution with respect to the initial conditions. One way to -do this is via Automatic Differentition (AD). For small numbers of parameters -(<100), it is fastest to use Forward-Mode Automatic Differentition -(even faster than using adjoint sensitivity analysis!). Thus for this problem -we will make use of ForwardDiff.jl to use Dual number arithmetic to retrive -both the solution and its derivative w.r.t. parameters in a single solve. - -[Use the information from the page on local sensitvity analysis](http://docs.juliadiffeq.org/dev/analysis/sensitivity.html) -to define the input dual numbers, solve the equation, and plot both the solution -over time and the derivative of the solution w.r.t. the parameters. - -## Part 4: Fitting Known Quantities with DiffEqParamEstim.jl + Optim.jl - -Now let's fit the delayed model to a dataset. For the data, use the array - -```{julia;eval=false} -t = 0.0:12.0:90.0 -data = [100.0 0.246196 0.000597933 0.24547 0.000596251 0.245275 0.000595453 0.245511 - 0.0 53.7939 16.8784 58.7789 18.3777 59.1879 18.5003 59.2611] -``` - -Use [the parameter estimation page](http://docs.juliadiffeq.org/dev/analysis/parameter_estimation.html) -to define a loss function with `build_loss_objective` and optimize the parameters -against the data. What parameters were used to generate the data? - -## Part 5: Implementing Control-Based Logic with ContinuousCallbacks (I) - -Now that we have fit our delay differential equation model to the dataset, we -want to start testing out automated treatment strategies. Let's assume that -instead of giving doses at fixed time points, we invent a wearable which -monitors the patient and administers a dose whenever the internal drug -concentration falls below 25. To model this effect, we will need to use -`ContinuousCallbacks` to define a callback that triggers when `[Central]` falls -below the threshold value. - -[Use the documentation on the event handling page](http://docs.juliadiffeq.org/dev/features/callback_functions.html) to define such a callback, -and plot the solution over time. How many times does the auto-doser administer -a dose? How much does this change as you change the delay time $\tau$? - -## Part 6: Global Sensitivity Analysis with the Morris and Sobol Methods - -To understand how the parameters effect the solution in a global sense, one -wants to use Global Sensitivity Analysis. Use the -[GSA documentation page](http://docs.juliadiffeq.org/dev/analysis/global_sensitivity.html) -perform global sensitivity analysis and quantify the effect of the various -parameters on the solution. - -# Problem 3: Differential-Algebraic Equation Modeling of a Double Pendulum (B) - -Differential-Algebraic Equaton (DAE) systems are like ODEs but allow for adding -constraints into the models. This problem will look at solving the double -penulum problem with enforcement of the rigid body constraints, requiring that -the total distance `L` is constant throughout the simulation. While these -equations can be rewritten in an ODE form, in many cases it can be simpler -to solve the equation directly with the constraints. This tutorial will -cover both the idea of index, how to manually perform index reduction, -and how to make use of mass matrix and implicit ODE solvers to handle these -problems. - -## Part 1: Simple Introduction to DAEs: Mass-Matrix Robertson Equations - -A mass-matrix ordinary differential equation (ODE) is an ODE where the -left-hand side, the derivative side, is multiplied by a matrix known as the -mass matrix. This is described as: - -$$Mu' = f(u,p,t)$$ - -where $M$ is the mass matrix. When $M$ is invertible, there is an ODE which is -equivalent to this formulation. When $M$ is not invertible, this can have a -distinctly different behavior and is as Differential-Algebraic Equation (DAE). - -Solve the Robertson DAE: - -$$\begin{align} -\frac{dy_1}{dt} &= -0.04y_1 + 10^4 y_2y_3\\ -\frac{dy_2}{dt} &= 0.04y_1 - 10^4 y_2y_3 - 3\times 10^7 y_2^2\\ -1 &= y_1 + y_2 + y_3\end{align}$$ - -with $y(0) = [1,0,0]$ and $dy(0) = [-0.04,0.04,0.0]$ using the mass-matrix -formulation and `Rodas5()`. Use the -[ODEProblem page](http://docs.juliadiffeq.org/dev/types/ode_types.html) -to find out how to declare a mass matrix. - -(Hint: what if the last row has all zeros?) - -## Part 2: Solving the Implicit Robertson Equations with IDA - -Use the [DAE Tutorial](http://docs.juliadiffeq.org/dev/tutorials/dae_example.html) -to define a DAE in its implicit form and solve the Robertson equation with IDA. -Why is `differential_vars = [true,true,false]`? - -## Part 3: Manual Index Reduction of the Single Pendulum - -The index of a DAE is a notion used to measure distance from -its related ODE. There are many different definitions of index, -but we're going to stick to the idea of differential index: -the number of differentiations required to convert a system -of DAEs into explicit ODE form. DAEs of high index are -usually transformed via a procedure called index reduction. -The following example will demonstrate this. - -Consider the index 3 DAE system of the cartesian pendulum. -After writing down the force equations in both directions, -we arrive at the following DAE: - -$$ -\begin{align} -m\ddot{x} &= \frac{x}{L}T \\ -m\ddot{y} &= \frac{y}{L}T - mg \\ -x^2 + y^2 &= L -\end{align} -$$ - -Notice that we don't have an equation describing the -behaviour of `T`. Let us now perform index reduction to -extract an equation for `T` - -Differentiate this third equation twice with respect to time -to reduce it from index 3 to index 1. - -## Part 4: Single Pendulum Solution with IDA -Write these equations in implicit form and solve the system using -IDA. - -## Part 5: Solving the Double Penulum DAE System - -The following equations describe a double -pendulum system: -$$ -\begin{align} -m_2\ddot{x_2} &= \frac{x_2}{L_2}T_2 \\ -m_2\ddot{y_2} &= \frac{y_2}{L_2}T_2 - m_2g \\ -{x_2}^2 + {y_2}^2 &= L_2 \\ -m_1\ddot{x_1} &= \frac{x_1}{L_1}T_1 - \frac{x_2}{L_2}T_2 \\ -m_2\ddot{y_1} &= \frac{y_1}{L_1}T_2 - m_1g - \frac{y_2}{L_2}T_2 \\ -{x_1}^2 + {y_1}^2 &= L_1 \\ -\end{align} -$$ - -Perform index reduction and solve it like in the previous example. - -# Problem 4: Performance Optimizing and Parallelizing Semilinear PDE Solvers (I) - -This problem will focus on implementing and optimizing the solution of the -2-dimensional Brusselator equations. The BRUSS equations are a well-known -highly stiff oscillatory system of partial differential equations which are -used in stiff ODE solver benchmarks. In this tutorial we will walk first -through a simple implementation, then do allocation-free implementations and -looking deep into solver options and benchmarking. - -## Part 1: Implementing the BRUSS PDE System as ODEs - -The Brusselator PDE is defined as follows: - -$$\begin{align} -\frac{\partial u}{\partial t} &= 1 + u^2v - 4.4u + \alpha(\frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2}) + f(x, y, t)\\ -\frac{\partial v}{\partial t} &= 3.4u - u^2v + \alpha(\frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2})\end{align}$$ - -where - -$$f(x, y, t) = \begin{cases} -5 & \quad \text{if } (x-0.3)^2+(y-0.6)^2 ≤ 0.1^2 \text{ and } t ≥ 1.1 \\ -0 & \quad \text{else}\end{cases}$$ - -and the initial conditions are - -$$\begin{align} -u(x, y, 0) &= 22\cdot y(1-y)^{3/2} \\ -v(x, y, 0) &= 27\cdot x(1-x)^{3/2}\end{align}$$ - -with the periodic boundary condition - -$$\begin{align} -u(x+1,y,t) &= u(x,y,t) \\ -u(x,y+1,t) &= u(x,y,t)\end{align}$$ - -on a timespan of $t \in [0,22]$. - -To solve this PDE, we will discretize it into a system of ODEs with the finite -difference method. We discretize `u` and `v` into arrays of the values at each -time point: `u[i,j] = u(i*dx,j*dy)` for some choice of `dx`/`dy`, and same for -`v`. Then our ODE is defined with `U[i,j,k] = [u v]`. The second derivative -operator, the Laplacian, discretizes to become a tridiagonal matrix with -`[1 -2 1]` and a `1` in the top right and bottom left corners. The nonlinear functions -are then applied at each point in space (they are broadcast). Use `dx=dy=1/32`. - -You will know when you have the correct solution when you plot the solution -at `x=y=0` and see a periodic orbit, e.g., `ts=0:0.05:22; plot(ts, sol1.(ts, -idxs=1))`. - -If you are not familiar with this process, see -[the Gierer-Meinhardt example from the DiffEqTutorials.](http://juliadiffeq.org/DiffEqTutorials.jl/html/introduction/03-optimizing_diffeq_code.html) - -Note: Start by doing the simplest implementation! - -## Part 2: Optimizing the BRUSS Code - -PDEs are expensive to solve, and so we will go nowhere without some code -optimizing! Follow the steps described in the -[the Gierer-Meinhardt example from the DiffEqTutorials](http://juliadiffeq.org/DiffEqTutorials.jl/html/introduction/03-optimizing_diffeq_code.html) -to optimize your Brusselator code. Try other formulations and see what ends -up the fastest! Find a trade-off between performance and simplicity that suits -your needs. - -## Part 3: Exploiting Jacobian Sparsity with Color Differentiation - -Use the `sparsity!` function from [SparseDiffTools](https://github.com/JuliaDiffEq/SparseDiffTools.jl) -to generate the sparsity pattern for the Jacobian of this problem. Follow -the documentations [on the DiffEqFunction page](http://docs.juliadiffeq.org/dev/features/performance_overloads.html) -to specify the sparsity pattern of the Jacobian. Generate an add the color -vector to speed up the computation of the Jacobian. - -## (Optional) Part 4: Structured Jacobians - -Specify the sparsity pattern using a BlockBandedMatrix from -[BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl) -to accelerate the previous sparsity handling tricks. - -## (Optional) Part 5: Automatic Symbolicification and Analytical Jacobian - -Use the `modelingtoolkitize` function from ModelingToolkit.jl to convert your -numerical ODE function into a symbolic ODE function and use that to compute and -solve with an analytical sparse Jacobian. - -## Part 6: Utilizing Preconditioned-GMRES Linear Solvers - -Use the [linear solver specification page](http://docs.juliadiffeq.org/dev/features/linear_nonlinear.html) -to solve the equation with `TRBDF2` with GMRES. Use the Sundials documentation -to solve the equation with `CVODE_BDF` with Sundials' special internal GMRES. -To both of these, use the [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl) -to add a preconditioner to the GMRES solver. - -## Part 7: Exploring IMEX and Exponential Integrator Techniques (E) - -Instead of using the standard `ODEProblem`, define a [`SplitODEProblem`](http://docs.juliadiffeq.org/dev/types/split_ode_types.html) -to move some of the equation to the "non-stiff part". Try different splits -and solve with `KenCarp4` to see if the solution can be accelerated. - -Next, use `MatrixFreeOperator` and `DiffEqArrayOperator` to define part of the equation as linear, and -use the `ETDRK4` exponential integrator to solve the equation. Note that this -technique is not appropriate for this equation since it relies on the -nonlinear term being non-stiff for best results. - -## Part 8: Work-Precision Diagrams for Benchmarking Solver Choices - -Use the `WorkPrecisionSet` method from -[DiffEqDevTools.jl](https://github.com/JuliaDiffEq/DiffEqDevTools.jl) to -benchmark multiple different solver methods and find out what combination is -most efficient. -[Take a look at DiffEqBenchmarks.jl](https://github.com/JuliaDiffEq/DiffEqBenchmarks.jl) -for usage examples. - -## Part 9: GPU-Parallelism for PDEs (E) - -Fully vectorize your implementation of the ODE and use a `CuArray` from -[CuArrays.jl](https://github.com/JuliaGPU/CuArrays.jl) as the initial condition -to cause the whole solution to be GPU accelerated. - -## Part 10: Adjoint Sensitivity Analysis for Gradients of PDEs - -In order to optimize the parameters of a PDE, you need to be able to compute -the gradient of the solution with respect to the parameters. This is done -through sensitivity analysis. For PDEs, generally the system is at a scale -where forward sensitivity analysis (forward-mode automatic differentiation) -is no longer suitable, and for these cases one uses adjoint sensitivity analysis. - -Rewrite the PDE so the constant terms are parameters, and use the -[adjoint sensitivity analysis](http://docs.juliadiffeq.org/dev/analysis/sensitivity.html#Adjoint-Sensitivity-Analysis-1) -documentation to solve for the solution gradient with a cost function being the -L2 distance of the solution from the value 1. Solve with interpolated and -checkpointed adjoints. Play with using reverse-mode automatic differentiation -vs direct computation of vector-Jacobian products using the `autojacvec` option -of the `SensitivityAlg`. Find the set of options most suitable for this PDE. - -If you have compute time, use this adjoint to optimize the parameters of the -PDE with respect to this cost function. - -# Problem 5: Global Parameter Sensitivity and Optimality with GPU and Distributed Ensembles (B) - -In this example we will investigate how the parameters "generally" effect the -solution in the chaotic Henon-Heiles system. By "generally" we will use global -sensitivity analysis methods to get an average global characterization of the -parameters on the solution. In addition to a global sensitivity approach, we -will generate large ensembles of solutions with different parameters using -a GPU-based parallelism approach. - -## Part 1: Implementing the Henon-Heiles System (B) - -The Henon-Heiles Hamiltonian system is described by the ODEs: - -$$\begin{align} -\frac{dp_1}{dt} &= -q_1 (1 + 2q_2)\\ -\frac{dp_2}{dt} &= -q_2 - (q_1^2 - q_2^2)\\ -\frac{dq_1}{dt} &= p_1\\ -\frac{dq_2}{dt} &= p_2\end{align}$$ - -with initial conditions $u_0 = [0.1,0.0,0.0,0.5]$. -Solve this system over the timespan $t\in[0,1000]$ - -## (Optional) Part 2: Alternative Dynamical Implmentations of Henon-Heiles (B) - -The Henon-Heiles defines a Hamiltonian system with certain structures which -can be utilized for a more efficient solution. Use [the Dynamical problems page](http://docs.juliadiffeq.org/dev/types/dynamical_types.html) -to define a `SecondOrderODEProblem` corresponding to the acceleration terms: - -$$\begin{align} -\frac{d^2q_1}{dt^2} &= -q_1 (1 + 2q_2)\\ -\frac{d^2q_2}{dt^2} &= -q_2 - (q_1^2 - q_2^2)\end{align}$$ - -Solve this with a method that is specific to dynamical problems, like `DPRKN6`. - -The Hamiltonian can also be directly described: - -$$H(p,q) = \frac{1}{2}(p_1^2 + p_2^2) + \frac{1}{2}(q_1^2+q_2^2+2q_1^2 q_2 - \frac{2}{3}q_2^3)$$ - -Solve this problem using the `HamiltonianProblem` constructor from DiffEqPhysics.jl. - -## Part 3: Parallelized Ensemble Solving - -To understand the orbits of the Henon-Heiles system, it can be useful to solve -the system with many different initial conditions. Use the -[ensemble interface](http://docs.juliadiffeq.org/dev/features/ensemble.html) -to solve with randomized initial conditions in parallel using threads with -`EnsembleThreads()`. Then, use `addprocs()` to add more cores and solve using -`EnsembleDistributed()`. The former will solve using all of the cores on a -single computer, while the latter will use all of the cores on which there -are processors, which can include thousands across a supercomputer! See -[Julia's parallel computing setup page](https://docs.julialang.org/en/v1/manual/parallel-computing/index.html) -for more details on the setup. - -## Part 4: Parallelized GPU Ensemble Solving - -Setup the CUDAnative.jl library and use the `EnsembleGPUArray()` method to -parallelize the solution across the thousands of cores of a GPU. Note that -this will efficiency solve for hundreds of thousands of trajectores. - -# Problem 6: Training Neural Stochastic Differential Equations with GPU acceleration (I) - -In the previous models we had to define a model. Now let's shift the burden of -model-proofing onto data by utilizing neural differential equations. A neural -differential equation is a differential equation where the model equations -are replaced, either in full or in part, by a neural network. For example, a -neural ordinary differential equation is an equation $u^\prime = f(u,p,t)$ -where $f$ is a neural network. We can learn this neural network from data using -various methods, the easiest of which is known as the single shooting method, -where one chooses neural network parameters, solves the equation, and checks -the ODE's solution against data as a loss. - -In this example we will define and train various forms of neural differential -equations. Note that all of the differential equation types are compatible with -neural differential equations, so this is only going to scratch the surface of -the possibilites! - -## Part 1: Constructing and Training a Basic Neural ODE - -Use the [DiffEqFlux.jl README](https://github.com/JuliaDiffEq/DiffEqFlux.jl) to -construct a neural ODE to train against the training data: - -```{julia;eval=false} -u0 = Float32[2.; 0.] -datasize = 30 -tspan = (0.0f0,1.5f0) - -function trueODEfunc(du,u,p,t) - true_A = [-0.1 2.0; -2.0 -0.1] - du .= ((u.^3)'true_A)' -end -t = range(tspan[1],tspan[2],length=datasize) -prob = ODEProblem(trueODEfunc,u0,tspan) -ode_data = Array(solve(prob,Tsit5(),saveat=t)) -``` - -## Part 2: GPU-accelerating the Neural ODE Process - -Use the `gpu` function from Flux.jl to transform all of the calculations onto -the GPU and train the neural ODE using GPU-accelerated `Tsit5` with adjoints. - -## Part 3: Defining and Training a Mixed Neural ODE - -Gather data from the Lotka-Volterra equation: - -```{julia;eval=false} -function lotka_volterra(du,u,p,t) - x, y = u - α, β, δ, γ = p - du[1] = dx = α*x - β*x*y - du[2] = dy = -δ*y + γ*x*y -end -u0 = [1.0,1.0] -tspan = (0.0,10.0) -p = [1.5,1.0,3.0,1.0] -prob = ODEProblem(lotka_volterra,u0,tspan,p) -sol = Array(solve(prob,Tsit5())(0.0:1.0:10.0)) -``` - -Now use the -[mixed neural section of the documentation](https://github.com/JuliaDiffEq/DiffEqFlux.jl#mixed-neural-des) -to define the mixed neural ODE where the functional form of $\frac{dx}{dt}$ is -known, and try to derive a neural formulation for $\frac{dy}{dt}$ directly from -the data. - -## Part 4: Constructing a Basic Neural SDE - -Generate data from the Lotka-Volterra equation with multiplicative noise - -```{julia;eval=false} -function lotka_volterra(du,u,p,t) - x, y = u - α, β, δ, γ = p - du[1] = dx = α*x - β*x*y - du[2] = dy = -δ*y + γ*x*y -end -function lv_noise(du,u,p,t) - du[1] = p[5]*u[1] - du[2] = p[6]*u[2] -end -u0 = [1.0,1.0] -tspan = (0.0,10.0) -p = [1.5,1.0,3.0,1.0,0.1,0.1] -prob = SDEProblem(lotka_volterra,lv_noise,u0,tspan,p) -sol = [Array(solve(prob,SOSRI())(0.0:1.0:10.0)) for i in 1:20] # 20 solution samples -``` - -Train a neural stochastic differential equation $dX = f(X)dt + g(X)dW_t$ where -both the drift ($f$) and the diffusion ($g$) functions are neural networks. -See if constraining $g$ can make the problem easier to fit. - -## Part 5: Optimizing the training behavior with minibatching (E) - -Use minibatching on the data to improve the training procedure. An example -[can be found at this PR](https://github.com/FluxML/model-zoo/pull/88). diff --git a/tutorials/exercises/02-workshop_solutions.jmd b/tutorials/exercises/02-workshop_solutions.jmd deleted file mode 100644 index c1128f19..00000000 --- a/tutorials/exercises/02-workshop_solutions.jmd +++ /dev/null @@ -1,722 +0,0 @@ ---- -title: DifferentialEquations.jl Workshop Exercise Solutions -author: Chris Rackauckas ---- - -```julia -using DifferentialEquations -using Sundials -using BenchmarkTools -using Plots -``` - -# Problem 1: Investigating Sources of Randomness and Uncertainty in a Biological System - -## Part 1: Simulating the Oregonator ODE model - -```julia -using DifferentialEquations, Plots -function orego(du,u,p,t) - s,q,w = p - y1,y2,y3 = u - du[1] = s*(y2+y1*(1-q*y1-y2)) - du[2] = (y3-(1+y1)*y2)/s - du[3] = w*(y1-y3) -end -p = [77.27,8.375e-6,0.161] -prob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,360.0),p) -sol = solve(prob) -plot(sol) -``` - -```julia -plot(sol,vars=(1,2,3)) -``` - -## Part 2: Investigating Stiffness - -```julia -using BenchmarkTools -prob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,50.0),p) -@btime sol = solve(prob,Tsit5()) -``` - -```julia -@btime sol = solve(prob,Rodas5()) -``` - -## (Optional) Part 3: Specifying Analytical Jacobians (I) - -## (Optional) Part 4: Automatic Symbolicification and Analytical Jacobian Calculations - -## Part 5: Adding stochasticity with stochastic differential equations - -```julia -function orego(du,u,p,t) - s,q,w = p - y1,y2,y3 = u - du[1] = s*(y2+y1*(1-q*y1-y2)) - du[2] = (y3-(1+y1)*y2)/s - du[3] = w*(y1-y3) -end -function g(du,u,p,t) - du[1] = 0.1u[1] - du[2] = 0.1u[2] - du[3] = 0.1u[3] -end -p = [77.27,8.375e-6,0.161] -prob = SDEProblem(orego,g,[1.0,2.0,3.0],(0.0,30.0),p) -sol = solve(prob,SOSRI()) -plot(sol) -``` - -```julia -sol = solve(prob,ImplicitRKMil()); plot(sol) -``` - -```julia -sol = solve(prob,ImplicitRKMil()); plot(sol) -``` - -## Part 6: Gillespie jump models of discrete stochasticity - -## Part 7: Probabilistic Programming / Bayesian Parameter Estimation with DiffEqBayes.jl + Turing.jl (I) - -The data was generated with: - -```julia -function orego(du,u,p,t) - s,q,w = p - y1,y2,y3 = u - du[1] = s*(y2+y1*(1-q*y1-y2)) - du[2] = (y3-(1+y1)*y2)/s - du[3] = w*(y1-y3) -end -p = [60.0,1e-5,0.2] -prob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,30.0),p) -sol = solve(prob,Rodas5(),abstol=1/10^14,reltol=1/10^14) -``` - -## (Optional) Part 8: Using DiffEqBiological's Reaction Network DSL - -# Problem 2: Fitting Hybrid Delay Pharmacokinetic Models with Automated Responses (B) - -## Part 1: Defining an ODE with Predetermined Doses - -```julia -function onecompartment(du,u,p,t) - Ka,Ke = p - du[1] = -Ka*u[1] - du[2] = Ka*u[1] - Ke*u[2] -end -p = (Ka=2.268,Ke=0.07398) -prob = ODEProblem(onecompartment,[100.0,0.0],(0.0,90.0),p) - -tstops = [24,48,72] -condition(u,t,integrator) = t ∈ tstops -affect!(integrator) = (integrator.u[1] += 100) -cb = DiscreteCallback(condition,affect!) -sol = solve(prob,Tsit5(),callback=cb,tstops=tstops) -plot(sol) -``` - -## Part 2: Adding Delays - -```julia -function onecompartment_delay(du,u,h,p,t) - Ka,Ke,τ = p - delayed_depot = h(p,t-τ)[1] - du[1] = -Ka*u[1] - du[2] = Ka*delayed_depot - Ke*u[2] -end -p = (Ka=2.268,Ke=0.07398,τ=6.0) -h(p,t) = [0.0,0.0] -prob = DDEProblem(onecompartment_delay,[100.0,0.0],h,(0.0,90.0),p) - -tstops = [24,48,72] -condition(u,t,integrator) = t ∈ tstops -affect!(integrator) = (integrator.u[1] += 100) -cb = DiscreteCallback(condition,affect!) -sol = solve(prob,MethodOfSteps(Rosenbrock23()),callback=cb,tstops=tstops) -plot(sol) -``` - -## Part 3: Automatic Differentiation (AD) for Optimization (I) - -## Part 4: Fitting Known Quantities with DiffEqParamEstim.jl + Optim.jl - -The data was generated with - -```julia -p = (Ka = 0.5, Ke = 0.1, τ = 4.0) -``` - -## Part 5: Implementing Control-Based Logic with ContinuousCallbacks (I) - -## Part 6: Global Sensitivity Analysis with the Morris and Sobol Methods - -# Problem 3: Differential-Algebraic Equation Modeling of a Double Pendulum (B) - -## Part 1: Simple Introduction to DAEs: Mass-Matrix Robertson Equations -```julia -function f(du, u, p, t) - du[1] = -p[1]*u[1] + p[2]*u[2]*u[3] - du[2] = p[1]*u[1] - p[2]*u[2]*u[3] - p[3]*u[2]*u[2] - du[3] = u[1] + u[2] + u[3] - 1. -end -M = [1 0 0; 0 1 0; 0 0 0.] -p = [0.04, 10^4, 3e7] -u0 = [1.,0.,0.] -tspan = (0., 1e6) -prob = ODEProblem(ODEFunction(f, mass_matrix = M), u0, tspan, p) -sol = solve(prob, Rodas5()) -plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) -``` - -## Part 2: Solving the Implicit Robertson Equations with IDA -```julia -# Robertson Equation DAE Implicit form -function h(out, du, u, p, t) - out[1] = -p[1]*u[1] + p[2]*u[2]*u[3] - du[1] - out[2] = p[1]*u[1] - p[2]*u[2]*u[3] - p[3]*u[2]*u[2] - du[2] - out[3] = u[1] + u[2] + u[3] - 1. -end -p = [0.04, 10^4, 3e7] -du0 = [-0.04, 0.04, 0.0] -u0 = [1.,0.,0.] -tspan = (0., 1e6) -differential_vars = [true, true, false] -prob = DAEProblem(h, du0, u0, tspan, p, differential_vars = differential_vars) -sol = solve(prob, IDA()) -plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) -``` - -## Part 3: Manual Index Reduction of the Single Pendulum -Consider the equation: -$$ -x^2 + y^2 = L -$$ -Differentiating once with respect to time: -$$ -2x\dot{x} + 2y\dot{y} = 0 -$$ -A second time: -$$ -\begin{align} -{\dot{x}}^2 + x\ddot{x} + {\dot{y}}^2 + y\ddot{y} &= 0 \\ -u^2 + v^2 + x(\frac{x}{mL}T) + y(\frac{y}{mL}T - g) &= 0 \\ -u^2 + v^2 + \frac{x^2 + y^2}{mL}T - yg &= 0 \\ -u^2 + v^2 + \frac{T}{m} - yg &= 0 -\end{align} -$$ - -Our final set of equations is hence -$$ -\begin{align} - \ddot{x} &= \frac{x}{mL}T \\ - \ddot{y} &= \frac{y}{mL}T - g \\ - \dot{x} &= u \\ - \dot{y} &= v \\ - u^2 + v^2 -yg + \frac{T}{m} &= 0 -\end{align} -$$ - -We finally obtain $T$ into the third equation. -This required two differentiations with respect -to time, and so our system of equations went from -index 3 to index 1. Now our solver can handle the -index 1 system. - -## Part 4: Single Pendulum Solution with IDA -```julia -function f(out, da, a, p, t) - (L, m, g) = p - u, v, x, y, T = a - du, dv, dx, dy, dT = da - out[1] = x*T/(m*L) - du - out[2] = y*T/(m*L) - g - dv - out[3] = u - dx - out[4] = v - dy - out[5] = u^2 + v^2 - y*g + T/m - nothing -end - -# Release pendulum from top right -u0 = zeros(5) -u0[3] = 1.0 -du0 = zeros(5) -du0[2] = 9.81 - -p = [1,1,9.8] -tspan = (0.,100.) - -differential_vars = [true, true, true, true, false] -prob = DAEProblem(f, du0, u0, tspan, p, differential_vars = differential_vars) -sol = solve(prob, IDA()) -plot(sol, vars=(3,4)) -``` - -## Part 5: Solving the Double Penulum DAE System -For the double pendulum: -The equations for the second ball are the same -as the single pendulum case. That is, the equations -for the second ball are: -$$ -\begin{align} - \ddot{x_2} &= \frac{x_2}{m_2L_2}T_2 \\ - \ddot{y_2} &= \frac{y_2}{m_2L_2}T_2 - g \\ - \dot{x_2} &= u \\ - \dot{y_2} &= v \\ - u_2^2 + v_2^2 -y_2g + \frac{T_2}{m_2} &= 0 -\end{align} -$$ -For the first ball, consider $x_1^2 + y_1^2 = L $ -$$ -\begin{align} -x_1^2 + x_2^2 &= L \\ -2x_1\dot{x_1} + 2y_1\dot{y_1} &= 0 \\ -\dot{x_1}^2 + \dot{y_1}^2 + x_1(\frac{x_1}{m_1L_1}T_1 - \frac{x_2}{m_1L_2}T_2) + y_1(\frac{y_1}{m_1L_1}T_1 - g - \frac{y_2}{m_1L_2}T_2) &= 0 \\ -u_1^2 + v_1^2 + \frac{T_1}{m_1} - \frac{x_1x_2 + y_1y_2}{m_1L_2}T_2 &= 0 -\end{align} -$$ - -So the final equations are: -$$ -\begin{align} - \dot{u_2} &= x_2*T_2/(m_2*L_2) - \dot{v_2} &= y_2*T_2/(m_2*L_2) - g - \dot{x_2} &= u_2 - \dot{y_2} &= v_2 - u_2^2 + v_2^2 -y_2*g + \frac{T_2}{m_2} &= 0 - - \dot{u_1} &= x_1*T_1/(m_1*L_1) - x_2*T_2/(m_2*L_2) - \dot{v_1} &= y_1*T_1/(m_1*L_1) - g - y_2*T_2/(m_2*L_2) - \dot{x_1} &= u_1 - \dot{y_1} &= v_1 - u_1^2 + v_1^2 + \frac{T_1}{m_1} + - \frac{-x_1*x_2 - y_1*y_2}{m_1L_2}T_2 - y_1g &= 0 -\end{align} -$$ -```julia -function f(out, da, a, p, t) - L1, m1, L2, m2, g = p - - u1, v1, x1, y1, T1, - u2, v2, x2, y2, T2 = a - - du1, dv1, dx1, dy1, dT1, - du2, dv2, dx2, dy2, dT2 = da - - out[1] = x2*T2/(m2*L2) - du2 - out[2] = y2*T2/(m2*L2) - g - dv2 - out[3] = u2 - dx2 - out[4] = v2 - dy2 - out[5] = u2^2 + v2^2 -y2*g + T2/m2 - - out[6] = x1*T1/(m1*L1) - x2*T2/(m2*L2) - du1 - out[7] = y1*T1/(m1*L1) - g - y2*T2/(m2*L2) - dv1 - out[8] = u1 - dx1 - out[9] = v1 - dy1 - out[10] = u1^2 + v1^2 + T1/m1 + - (-x1*x2 - y1*y2)/(m1*L2)*T2 - y1*g - nothing -end - -# Release pendulum from top right -u0 = zeros(10) -u0[3] = 1.0 -u0[8] = 1.0 -du0 = zeros(10) -du0[2] = 9.8 -du0[7] = 9.8 - -p = [1,1,1,1,9.8] -tspan = (0.,100.) - -differential_vars = [true, true, true, true, false, - true, true, true, true, false] -prob = DAEProblem(f, du0, u0, tspan, p, differential_vars = differential_vars) -sol = solve(prob, IDA()) - -plot(sol, vars=(3,4)) -plot(sol, vars=(8,9)) -``` - -# Problem 4: Performance Optimizing and Parallelizing Semilinear PDE Solvers (I) -## Part 1: Implementing the BRUSS PDE System as ODEs - -```julia -using OrdinaryDiffEq, Sundials, Plots - -# initial condition -function init_brusselator_2d(xyd) - N = length(xyd) - u = zeros(N, N, 2) - for I in CartesianIndices((N, N)) - x = xyd[I[1]] - y = xyd[I[2]] - u[I,1] = 22*(y*(1-y))^(3/2) - u[I,2] = 27*(x*(1-x))^(3/2) - end - u -end - -N = 32 - -xyd_brusselator = range(0,stop=1,length=N) - -u0 = vec(init_brusselator_2d(xyd_brusselator)) - -tspan = (0, 22.) - -p = (3.4, 1., 10., xyd_brusselator) - -brusselator_f(x, y, t) = ifelse((((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) && - (t >= 1.1), 5., 0.) - - -using LinearAlgebra, SparseArrays -du = ones(N-1) -D2 = spdiagm(-1 => du, 0=>fill(-2.0, N), 1 => du) -D2[1, N] = D2[N, 1] = 1 -D2 = 1/step(xyd_brusselator)^2*D2 -tmp = Matrix{Float64}(undef, N, N) -function brusselator_2d_op(du, u, (D2, tmp, p), t) - A, B, α, xyd = p - dx = step(xyd) - N = length(xyd) - α = α/dx^2 - du = reshape(du, N, N, 2) - u = reshape(u, N, N, 2) - @views for i in axes(u, 3) - ui = u[:, :, i] - dui = du[:, :, i] - mul!(tmp, D2, ui) - mul!(dui, ui, D2') - dui .+= tmp - end - - @inbounds begin - for I in CartesianIndices((N, N)) - x = xyd[I[1]] - y = xyd[I[2]] - i = I[1] - j = I[2] - - du[i,j,1] = α*du[i,j,1] + B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t) - du[i,j,2] = α*du[i,j,2] + A*u[i,j,1] - u[i,j,1]^2*u[i,j,2] - end - end - nothing -end - -prob1 = ODEProblem(brusselator_2d_op, u0, tspan, (D2, tmp, p)) - -sol1 = @time solve(prob1, TRBDF2(autodiff=false)); -``` - -Visualizing the solution (works best in a terminal): -```julia -gr() -function plot_sol(sol) - off = N^2 - for t in sol.t[1]:0.1:sol.t[end] - solt = sol(t) - plt1 = surface(reshape(solt[1:off], N, N), zlims=(0, 5), leg=false) - surface!(plt1, reshape(solt[off+1:end], N, N), zlims=(0, 5), leg=false) - display(plt1) - sleep(0.05) - end - nothing -end - -plot_sol(sol1) -``` - - -## Part 2: Optimizing the BRUSS Code - -```julia -function brusselator_2d_loop(du, u, p, t) - A, B, α, xyd = p - dx = step(xyd) - N = length(xyd) - α = α/dx^2 - limit = a -> let N=N - a == N+1 ? 1 : - a == 0 ? N : - a - end - II = LinearIndices((N, N, 2)) - - @inbounds begin - for I in CartesianIndices((N, N)) - x = xyd[I[1]] - y = xyd[I[2]] - i = I[1] - j = I[2] - ip1 = limit(i+1) - im1 = limit(i-1) - jp1 = limit(j+1) - jm1 = limit(j-1) - - ii1 = II[i,j,1] - ii2 = II[i,j,2] - - du[II[i,j,1]] = α*(u[II[im1,j,1]] + u[II[ip1,j,1]] + u[II[i,jp1,1]] + u[II[i,jm1,1]] - 4u[ii1]) + - B + u[ii1]^2*u[ii2] - (A + 1)*u[ii1] + brusselator_f(x, y, t) - - du[II[i,j,2]] = α*(u[II[im1,j,2]] + u[II[ip1,j,2]] + u[II[i,jp1,2]] + u[II[i,jm1,2]] - 4u[II[i,j,2]]) + - A*u[ii1] - u[ii1]^2*u[ii2] - end - end - nothing -end - -prob2 = ODEProblem(brusselator_2d_loop, u0, tspan, p) - -sol2 = @time solve(prob2, TRBDF2()) -sol2_2 = @time solve(prob2, CVODE_BDF()) -``` - -## Part 3: Exploiting Jacobian Sparsity with Color Differentiation - -```julia -using SparseDiffTools - -sparsity_pattern = sparsity!(brusselator_2d_loop,similar(u0),u0,p,2.0) -jac_sp = sparse(sparsity_pattern) -jac = Float64.(jac_sp) -colors = matrix_colors(jac) -prob3 = ODEProblem(ODEFunction(brusselator_2d_loop, colorvec=colors,jac_prototype=jac_sp), u0, tspan, p) -sol3 = @time solve(prob3, TRBDF2()) -``` - -## (Optional) Part 4: Structured Jacobians - -## (Optional) Part 5: Automatic Symbolicification and Analytical Jacobian - -## Part 6: Utilizing Preconditioned-GMRES Linear Solvers - -```julia -using DiffEqOperators -using Sundials -using AlgebraicMultigrid: ruge_stuben, aspreconditioner, smoothed_aggregation -prob6 = ODEProblem(ODEFunction(brusselator_2d_loop, jac_prototype=JacVecOperator{Float64}(brusselator_2d_loop, u0)), u0, tspan, p) -II = Matrix{Float64}(I, N, N) -Op = kron(Matrix{Float64}(I, 2, 2), kron(D2, II) + kron(II, D2)) -Wapprox = -I+Op -#ml = ruge_stuben(Wapprox) -ml = smoothed_aggregation(Wapprox) -precond = aspreconditioner(ml) -sol_trbdf2 = @time solve(prob6, TRBDF2(linsolve=LinSolveGMRES())); # no preconditioner -sol_trbdf2 = @time solve(prob6, TRBDF2(linsolve=LinSolveGMRES(Pl=lu(Wapprox)))); # sparse LU -sol_trbdf2 = @time solve(prob6, TRBDF2(linsolve=LinSolveGMRES(Pl=precond))); # AMG -sol_cvodebdf = @time solve(prob2, CVODE_BDF(linear_solver=:GMRES)); -``` - -## Part 7: Exploring IMEX and Exponential Integrator Techniques (E) - -```julia -function laplacian2d(du, u, p, t) - A, B, α, xyd = p - dx = step(xyd) - N = length(xyd) - du = reshape(du, N, N, 2) - u = reshape(u, N, N, 2) - @inbounds begin - α = α/dx^2 - limit = a -> let N=N - a == N+1 ? 1 : - a == 0 ? N : - a - end - for I in CartesianIndices((N, N)) - x = xyd[I[1]] - y = xyd[I[2]] - i = I[1] - j = I[2] - ip1 = limit(i+1) - im1 = limit(i-1) - jp1 = limit(j+1) - jm1 = limit(j-1) - du[i,j,1] = α*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) - du[i,j,2] = α*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) - end - end - nothing -end -function brusselator_reaction(du, u, p, t) - A, B, α, xyd = p - dx = step(xyd) - N = length(xyd) - du = reshape(du, N, N, 2) - u = reshape(u, N, N, 2) - @inbounds begin - for I in CartesianIndices((N, N)) - x = xyd[I[1]] - y = xyd[I[2]] - i = I[1] - j = I[2] - du[i,j,1] = B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t) - du[i,j,2] = A*u[i,j,1] - u[i,j,1]^2*u[i,j,2] - end - end - nothing -end -prob7 = SplitODEProblem(laplacian2d, brusselator_reaction, u0, tspan, p) -sol7 = @time solve(prob7, KenCarp4()) -M = MatrixFreeOperator((du,u,p)->laplacian2d(du, u, p, 0), (p,), size=(2*N^2, 2*N^2), opnorm=1000) -prob7_2 = SplitODEProblem(M, brusselator_reaction, u0, tspan, p) -sol7_2 = @time solve(prob7_2, ETDRK4(krylov=true), dt=1) -prob7_3 = SplitODEProblem(DiffEqArrayOperator(Op), brusselator_reaction, u0, tspan, p) -sol7_3 = solve(prob7_3, KenCarp4()); -``` - -## Part 8: Work-Precision Diagrams for Benchmarking Solver Choices - -```julia -using DiffEqDevTools -abstols = 0.1 .^ (5:8) -reltols = 0.1 .^ (1:4) -sol = solve(prob3,CVODE_BDF(linear_solver=:GMRES),abstol=1/10^7,reltol=1/10^10) -test_sol = TestSolution(sol) -probs = [prob2, prob3, prob6] -setups = [Dict(:alg=>CVODE_BDF(),:prob_choice => 1), - Dict(:alg=>CVODE_BDF(linear_solver=:GMRES), :prob_choice => 1), - Dict(:alg=>TRBDF2(), :prob_choice => 1), - Dict(:alg=>TRBDF2(linsolve=LinSolveGMRES(Pl=precond)), :prob_choice => 3), - Dict(:alg=>TRBDF2(), :prob_choice => 2) - ] -labels = ["CVODE_BDF (dense)" "CVODE_BDF (GMRES)" "TRBDF2 (dense)" "TRBDF2 (sparse)" "TRBDF2 (GMRES)"] -wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=[test_sol,test_sol,test_sol],save_everystep=false,numruns=3, - names=labels, print_names=true, seconds=0.5) -plot(wp) -``` - -## Part 9: GPU-Parallelism for PDEs (E) - -## Part 10: Adjoint Sensitivity Analysis for Gradients of PDEs - -# Problem 5: Global Parameter Sensitivity and Optimality with GPU and Distributed Ensembles (B) - -## Part 1: Implementing the Henon-Heiles System (B) - -```julia -function henon(dz,z,p,t) - p₁, p₂, q₁, q₂ = z[1], z[2], z[3], z[4] - dp₁ = -q₁*(1 + 2q₂) - dp₂ = -q₂-(q₁^2 - q₂^2) - dq₁ = p₁ - dq₂ = p₂ - - dz .= [dp₁, dp₂, dq₁, dq₂] - return nothing -end - -u₀ = [0.1, 0.0, 0.0, 0.5] -prob = ODEProblem(henon, u₀, (0., 1000.)) -sol = solve(prob, Vern9(), abstol=1e-14, reltol=1e-14) - -plot(sol, vars=[(3,4,1)], tspan=(0,100)) -``` - -## (Optional) Part 2: Alternative Dynamical Implmentations of Henon-Heiles (B) - -```julia -function henon(ddz,dz,z,p,t) - p₁, p₂ = dz[1], dz[2] - q₁, q₂ = z[1], z[2] - ddq₁ = -q₁*(1 + 2q₂) - ddq₂ = -q₂-(q₁^2 - q₂^2) - - ddz .= [ddq₁, ddq₂] -end - -p₀ = u₀[1:2] -q₀ = u₀[3:4] -prob2 = SecondOrderODEProblem(henon, p₀, q₀, (0., 1000.)) -sol = solve(prob2, DPRKN6(), abstol=1e-10, reltol=1e-10) - -plot(sol, vars=[(3,4)], tspan=(0,100)) - -H(p, q, params) = 1/2 * (p[1]^2 + p[2]^2) + 1/2 * (q[1]^2 + q[2]^2 + 2q[1]^2 * q[2] - 2/3*q[2]^3) - -prob3 = HamiltonianProblem(H, p₀, q₀, (0., 1000.)) -sol = solve(prob3, DPRKN6(), abstol=1e-10, reltol=1e-10) - -plot(sol, vars=[(3,4)], tspan=(0,100)) -``` - -## Part 3: Parallelized Ensemble Solving - -In order to solve with an ensamble we need some initial conditions. -```julia -function generate_ics(E,n) - # The hardcoded values bellow can be estimated by looking at the - # figures in the Henon-Heiles 1964 article - qrange = range(-0.4, stop = 1.0, length = n) - prange = range(-0.5, stop = 0.5, length = n) - z0 = Vector{Vector{typeof(E)}}() - for q in qrange - V = H([0,0],[0,q],nothing) - V ≥ E && continue - for p in prange - T = 1/2*p^2 - T + V ≥ E && continue - z = [√(2(E-V-T)), p, 0, q] - push!(z0, z) - end - end - return z0 -end - -z0 = generate_ics(0.125, 10) - -function prob_func(prob,i,repeat) - @. prob.u0 = z0[i] - prob -end - -ensprob = EnsembleProblem(prob, prob_func=prob_func) -sim = solve(ensprob, Vern9(), EnsembleThreads(), trajectories=length(z0)) - -plot(sim, vars=(3,4), tspan=(0,10)) -``` - -## Part 4: Parallelized GPU Ensemble Solving - -In order to use GPU parallelization we must make all inputs -(initial conditions, tspan, etc.) `Float32` and the function -definition should be in the in-place form, avoid bound checking and -return `nothing`. - -```julia -using DiffEqGPU - -function henon_gpu(dz,z,p,t) - @inbounds begin - dz[1] = -z[3]*(1 + 2z[4]) - dz[2] = -z[4]-(z[3]^2 - z[4]^2) - dz[3] = z[1] - dz[4] = z[2] - end - return nothing -end - -z0 = generate_ics(0.125f0, 50) -prob_gpu = ODEProblem(henon_gpu, Float32.(u₀), (0.f0, 1000.f0)) -ensprob = EnsembleProblem(prob_gpu, prob_func=prob_func) -sim = solve(ensprob, Tsit5(), EnsembleGPUArray(), trajectories=length(z0)) -``` -# Problem 6: Training Neural Stochastic Differential Equations with GPU acceleration (I) - -## Part 1: Constructing and Training a Basic Neural ODE - -## Part 2: GPU-accelerating the Neural ODE Process - -## Part 3: Defining and Training a Mixed Neural ODE - -## Part 4: Constructing a Basic Neural SDE - -## Part 5: Optimizing the training behavior with minibatching (E) diff --git a/tutorials/introduction/01-ode_introduction.jmd b/tutorials/introduction/01-ode_introduction.jmd deleted file mode 100644 index 4aab979e..00000000 --- a/tutorials/introduction/01-ode_introduction.jmd +++ /dev/null @@ -1,395 +0,0 @@ ---- -title: An Intro to DifferentialEquations.jl -author: Chris Rackauckas ---- - -## Basic Introduction Via Ordinary Differential Equations - -This notebook will get you started with DifferentialEquations.jl by introducing you to the functionality for solving ordinary differential equations (ODEs). The corresponding documentation page is the [ODE tutorial](http://docs.juliadiffeq.org/dev/tutorials/ode_example.html). While some of the syntax may be different for other types of equations, the same general principles hold in each case. Our goal is to give a gentle and thorough introduction that highlights these principles in a way that will help you generalize what you have learned. - -### Background - -If you are new to the study of differential equations, it can be helpful to do a quick background read on [the definition of ordinary differential equations](https://en.wikipedia.org/wiki/Ordinary_differential_equation). We define an ordinary differential equation as an equation which describes the way that a variable $u$ changes, that is - -$$u' = f(u,p,t)$$ - -where $p$ are the parameters of the model, $t$ is the time variable, and $f$ is the nonlinear model of how $u$ changes. The initial value problem also includes the information about the starting value: - -$$u(t_0) = u_0$$ - -Together, if you know the starting value and you know how the value will change with time, then you know what the value will be at any time point in the future. This is the intuitive definition of a differential equation. - -### First Model: Exponential Growth - -Our first model will be the canonical exponential growth model. This model says that the rate of change is proportional to the current value, and is this: - -$$u' = au$$ - -where we have a starting value $u(0)=u_0$. Let's say we put 1 dollar into Bitcoin which is increasing at a rate of $98\%$ per year. Then calling now $t=0$ and measuring time in years, our model is: - -$$u' = 0.98u$$ - -and $u(0) = 1.0$. We encode this into Julia by noticing that, in this setup, we match the general form when - -```julia -f(u,p,t) = 0.98u -``` - -with $ u_0 = 1.0 $. If we want to solve this model on a time span from `t=0.0` to `t=1.0`, then we define an `ODEProblem` by specifying this function `f`, this initial condition `u0`, and this time span as follows: - -```julia -using DifferentialEquations -f(u,p,t) = 0.98u -u0 = 1.0 -tspan = (0.0,1.0) -prob = ODEProblem(f,u0,tspan) -``` - -To solve our `ODEProblem` we use the command `solve`. - -```julia -sol = solve(prob) -``` - -and that's it: we have succesfully solved our first ODE! - -#### Analyzing the Solution - -Of course, the solution type is not interesting in and of itself. We want to understand the solution! The documentation page which explains in detail the functions for analyzing the solution is the [Solution Handling](http://docs.juliadiffeq.org/dev/basics/solution.html) page. Here we will describe some of the basics. You can plot the solution using the plot recipe provided by [Plots.jl](http://docs.juliaplots.org/dev/): - -```julia -using Plots; gr() -plot(sol) -``` - -From the picture we see that the solution is an exponential curve, which matches our intuition. As a plot recipe, we can annotate the result using any of the [Plots.jl attributes](http://docs.juliaplots.org/dev/attributes/). For example: - -```julia -plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line", - xaxis="Time (t)",yaxis="u(t) (in μm)",label="My Thick Line!") # legend=false -``` - -Using the mutating `plot!` command we can add other pieces to our plot. For this ODE we know that the true solution is $u(t) = u_0 exp(at)$, so let's add some of the true solution to our plot: - -```julia -plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!") -``` - -In the previous command I demonstrated `sol.t`, which grabs the array of time points that the solution was saved at: - -```julia -sol.t -``` - -We can get the array of solution values using `sol.u`: - -```julia -sol.u -``` - -`sol.u[i]` is the value of the solution at time `sol.t[i]`. We can compute arrays of functions of the solution values using standard comprehensions, like: - -```julia -[t+u for (u,t) in tuples(sol)] -``` - -However, one interesting feature is that, by default, the solution is a continuous function. If we check the print out again: - -```julia -sol -``` - -you see that it says that the solution has a order changing interpolation. The default algorithm automatically switches between methods in order to handle all types of problems. For non-stiff equations (like the one we are solving), it is a continuous function of 4th order accuracy. We can call the solution as a function of time `sol(t)`. For example, to get the value at `t=0.45`, we can use the command: - -```julia -sol(0.45) -``` - -#### Controlling the Solver - -DifferentialEquations.jl has a common set of solver controls among its algorithms which can be found [at the Common Solver Options](http://docs.juliadiffeq.org/dev/basics/common_solver_opts.html) page. We will detail some of the most widely used options. - -The most useful options are the tolerances `abstol` and `reltol`. These tell the internal adaptive time stepping engine how precise of a solution you want. Generally, `reltol` is the relative accuracy while `abstol` is the accuracy when `u` is near zero. These tolerances are local tolerances and thus are not global guarantees. However, a good rule of thumb is that the total solution accuracy is 1-2 digits less than the relative tolerances. Thus for the defaults `abstol=1e-6` and `reltol=1e-3`, you can expect a global accuracy of about 1-2 digits. If we want to get around 6 digits of accuracy, we can use the commands: - -```julia -sol = solve(prob,abstol=1e-8,reltol=1e-8) -``` - -Now we can see no visible difference against the true solution: - - -```julia -plot(sol) -plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!") -``` - -Notice that by decreasing the tolerance, the number of steps the solver had to take was `9` instead of the previous `5`. There is a trade off between accuracy and speed, and it is up to you to determine what is the right balance for your problem. - -Another common option is to use `saveat` to make the solver save at specific time points. For example, if we want the solution at an even grid of `t=0.1k` for integers `k`, we would use the command: - -```julia -sol = solve(prob,saveat=0.1) -``` - -Notice that when `saveat` is used the continuous output variables are no longer saved and thus `sol(t)`, the interpolation, is only first order. We can save at an uneven grid of points by passing a collection of values to `saveat`. For example: - -```julia -sol = solve(prob,saveat=[0.2,0.7,0.9]) -``` - -If we need to reduce the amount of saving, we can also turn off the continuous output directly via `dense=false`: - -```julia -sol = solve(prob,dense=false) -``` - -and to turn off all intermediate saving we can use `save_everystep=false`: - -```julia -sol = solve(prob,save_everystep=false) -``` - -If we want to solve and only save the final value, we can even set `save_start=false`. - -```julia -sol = solve(prob,save_everystep=false,save_start = false) -``` - -Note that similarly on the other side there is `save_end=false`. - -More advanced saving behaviors, such as saving functionals of the solution, are handled via the `SavingCallback` in the [Callback Library](http://docs.juliadiffeq.org/dev/features/callback_library.html#SavingCallback-1) which will be addressed later in the tutorial. - -#### Choosing Solver Algorithms - -There is no best algorithm for numerically solving a differential equation. When you call `solve(prob)`, DifferentialEquations.jl makes a guess at a good algorithm for your problem, given the properties that you ask for (the tolerances, the saving information, etc.). However, in many cases you may want more direct control. A later notebook will help introduce the various *algorithms* in DifferentialEquations.jl, but for now let's introduce the *syntax*. - -The most crucial determining factor in choosing a numerical method is the stiffness of the model. Stiffness is roughly characterized by a Jacobian `f` with large eigenvalues. That's quite mathematical, and we can think of it more intuitively: if you have big numbers in `f` (like parameters of order `1e5`), then it's probably stiff. Or, as the creator of the MATLAB ODE Suite, Lawrence Shampine, likes to define it, if the standard algorithms are slow, then it's stiff. We will go into more depth about diagnosing stiffness in a later tutorial, but for now note that if you believe your model may be stiff, you can hint this to the algorithm chooser via `alg_hints = [:stiff]`. - -```julia -sol = solve(prob,alg_hints=[:stiff]) -``` - -Stiff algorithms have to solve implicit equations and linear systems at each step so they should only be used when required. - -If we want to choose an algorithm directly, you can pass the algorithm type after the problem as `solve(prob,alg)`. For example, let's solve this problem using the `Tsit5()` algorithm, and just for show let's change the relative tolerance to `1e-6` at the same time: - -```julia -sol = solve(prob,Tsit5(),reltol=1e-6) -``` - -### Systems of ODEs: The Lorenz Equation - -Now let's move to a system of ODEs. The [Lorenz equation](https://en.wikipedia.org/wiki/Lorenz_system) is the famous "butterfly attractor" that spawned chaos theory. It is defined by the system of ODEs: - -$$ -\begin{align} -\frac{dx}{dt} &= \sigma (y - x)\\ -\frac{dy}{dt} &= x (\rho - z) -y\\ -\frac{dz}{dt} &= xy - \beta z -\end{align} -$$ - -To define a system of differential equations in DifferentialEquations.jl, we define our `f` as a vector function with a vector initial condition. Thus, for the vector `u = [x,y,z]'`, we have the derivative function: - -```julia -function lorenz!(du,u,p,t) - σ,ρ,β = p - du[1] = σ*(u[2]-u[1]) - du[2] = u[1]*(ρ-u[3]) - u[2] - du[3] = u[1]*u[2] - β*u[3] -end -``` - -Notice here we used the in-place format which writes the output to the preallocated vector `du`. For systems of equations the in-place format is faster. We use the initial condition $u_0 = [1.0,0.0,0.0]$ as follows: - -```julia -u0 = [1.0,0.0,0.0] -``` - -Lastly, for this model we made use of the parameters `p`. We need to set this value in the `ODEProblem` as well. For our model we want to solve using the parameters $\sigma = 10$, $\rho = 28$, and $\beta = 8/3$, and thus we build the parameter collection: - -```julia -p = (10,28,8/3) # we could also make this an array, or any other type! -``` - -Now we generate the `ODEProblem` type. In this case, since we have parameters, we add the parameter values to the end of the constructor call. Let's solve this on a time span of `t=0` to `t=100`: - -```julia -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan,p) -``` - -Now, just as before, we solve the problem: - -```julia -sol = solve(prob) -``` - -The same solution handling features apply to this case. Thus `sol.t` stores the time points and `sol.u` is an array storing the solution at the corresponding time points. - -However, there are a few extra features which are good to know when dealing with systems of equations. First of all, `sol` also acts like an array. `sol[i]` returns the solution at the `i`th time point. - -```julia -sol.t[10],sol[10] -``` - -Additionally, the solution acts like a matrix where `sol[j,i]` is the value of the `j`th variable at time `i`: - -```julia -sol[2,10] -``` - -We can get a real matrix by performing a conversion: - -```julia -A = Array(sol) -``` - -This is the same as sol, i.e. `sol[i,j] = A[i,j]`, but now it's a true matrix. Plotting will by default show the time series for each variable: - -```julia -plot(sol) -``` - -If we instead want to plot values against each other, we can use the `vars` command. Let's plot variable `1` against variable `2` against variable `3`: - -```julia -plot(sol,vars=(1,2,3)) -``` - -This is the classic Lorenz attractor plot, where the `x` axis is `u[1]`, the `y` axis is `u[2]`, and the `z` axis is `u[3]`. Note that the plot recipe by default uses the interpolation, but we can turn this off: - -```julia -plot(sol,vars=(1,2,3),denseplot=false) -``` - -Yikes! This shows how calculating the continuous solution has saved a lot of computational effort by computing only a sparse solution and filling in the values! Note that in vars, `0=time`, and thus we can plot the time series of a single component like: - -```julia -plot(sol,vars=(0,2)) -``` - -### A DSL for Parameterized Functions - -In many cases you may be defining a lot of functions with parameters. There exists the domain-specific language (DSL) defined by the `@ode_def` macro for helping with this common problem. For example, we can define the Lotka-Volterra equation: - -$$ -\begin{align} -\frac{dx}{dt} &= ax - bxy\\ -\frac{dy}{dt} &= -cy + dxy -\end{align} -$$ - -as follows: - -```julia -function lotka_volterra!(du,u,p,t) - du[1] = p[1]*u[1] - p[2]*u[1]*u[2] - du[2] = -p[3]*u[2] + p[4]*u[1]*u[2] -end -``` - -However, that can be hard to follow since there's a lot of "programming" getting in the way. Instead, you can use the `@ode_def` macro from ParameterizedFunctions.jl: - -```julia -using ParameterizedFunctions -lv! = @ode_def LotkaVolterra begin - dx = a*x - b*x*y - dy = -c*y + d*x*y -end a b c d -``` - -We can then use the result just like an ODE function from before: - -```julia -u0 = [1.0,1.0] -p = (1.5,1.0,3.0,1.0) -tspan = (0.0,10.0) -prob = ODEProblem(lv!,u0,tspan,p) -sol = solve(prob) -plot(sol) -``` - -Not only is the DSL convenient syntax, but it does some magic behind the scenes. For example, further parts of the tutorial will describe how solvers for stiff differential equations have to make use of the Jacobian in calculations. Here, the DSL uses symbolic differentiation to automatically derive that function: - -```julia -lv!.Jex -``` - -The DSL can derive many other functions; this ability is used to speed up the solvers. An extension to DifferentialEquations.jl, [Latexify.jl](https://korsbo.github.io/Latexify.jl/dev/tutorials/parameterizedfunctions.html), allows you to extract these pieces as LaTeX expressions. - -## Internal Types - -The last basic user-interface feature to explore is the choice of types. DifferentialEquations.jl respects your input types to determine the internal types that are used. Thus since in the previous cases, when we used `Float64` values for the initial condition, this meant that the internal values would be solved using `Float64`. We made sure that time was specified via `Float64` values, meaning that time steps would utilize 64-bit floats as well. But, by simply changing these types we can change what is used internally. - -As a quick example, let's say we want to solve an ODE defined by a matrix. To do this, we can simply use a matrix as input. - -```julia -A = [1. 0 0 -5 - 4 -2 4 -3 - -4 0 0 1 - 5 -2 2 3] -u0 = rand(4,2) -tspan = (0.0,1.0) -f(u,p,t) = A*u -prob = ODEProblem(f,u0,tspan) -sol = solve(prob) -``` - -There is no real difference from what we did before, but now in this case `u0` is a `4x2` matrix. Because of that, the solution at each time point is matrix: - -```julia -sol[3] -``` - -In DifferentialEquations.jl, you can use any type that defines `+`, `-`, `*`, `/`, and has an appropriate `norm`. For example, if we want arbitrary precision floating point numbers, we can change the input to be a matrix of `BigFloat`: - -```julia -big_u0 = big.(u0) -``` - -and we can solve the `ODEProblem` with arbitrary precision numbers by using that initial condition: - -```julia -prob = ODEProblem(f,big_u0,tspan) -sol = solve(prob) -``` - -```julia -sol[1,3] -``` - -To really make use of this, we would want to change `abstol` and `reltol` to be small! Notice that the type for "time" is different than the type for the dependent variables, and this can be used to optimize the algorithm via keeping multiple precisions. We can convert time to be arbitrary precision as well by defining our time span with `BigFloat` variables: - -```julia -prob = ODEProblem(f,big_u0,big.(tspan)) -sol = solve(prob) -``` - -Let's end by showing a more complicated use of types. For small arrays, it's usually faster to do operations on static arrays via the package [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl). The syntax is similar to that of normal arrays, but for these special arrays we utilize the `@SMatrix` macro to indicate we want to create a static array. - -```julia -using StaticArrays -A = @SMatrix [ 1.0 0.0 0.0 -5.0 - 4.0 -2.0 4.0 -3.0 - -4.0 0.0 0.0 1.0 - 5.0 -2.0 2.0 3.0] -u0 = @SMatrix rand(4,2) -tspan = (0.0,1.0) -f(u,p,t) = A*u -prob = ODEProblem(f,u0,tspan) -sol = solve(prob) -``` - -```julia -sol[3] -``` - -## Conclusion - -These are the basic controls in DifferentialEquations.jl. All equations are defined via a problem type, and the `solve` command is used with an algorithm choice (or the default) to get a solution. Every solution acts the same, like an array `sol[i]` with `sol.t[i]`, and also like a continuous function `sol(t)` with a nice plot command `plot(sol)`. The Common Solver Options can be used to control the solver for any equation type. Lastly, the types used in the numerical solving are determined by the input types, and this can be used to solve with arbitrary precision and add additional optimizations (this can be used to solve via GPUs for example!). While this was shown on ODEs, these techniques generalize to other types of equations as well. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/02-choosing_algs.jmd b/tutorials/introduction/02-choosing_algs.jmd deleted file mode 100644 index c473bea0..00000000 --- a/tutorials/introduction/02-choosing_algs.jmd +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: Choosing an ODE Algorithm -author: Chris Rackauckas ---- - -While the default algorithms, along with `alg_hints = [:stiff]`, will suffice in most cases, there are times when you may need to exert more control. The purpose of this part of the tutorial is to introduce you to some of the most widely used algorithm choices and when they should be used. The corresponding page of the documentation is the [ODE Solvers](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html) page which goes into more depth. - -## Diagnosing Stiffness - -One of the key things to know for algorithm choices is whether your problem is stiff. Let's take for example the driven Van Der Pol equation: - -```julia -using DifferentialEquations, ParameterizedFunctions -van! = @ode_def VanDerPol begin - dy = μ*((1-x^2)*y - x) - dx = 1*y -end μ - -prob = ODEProblem(van!,[0.0,2.0],(0.0,6.3),1e6) -``` - -One indicating factor that should alert you to the fact that this model may be stiff is the fact that the parameter is `1e6`: large parameters generally mean stiff models. If we try to solve this with the default method: - -```julia -sol = solve(prob,Tsit5()) -``` - -Here it shows that maximum iterations were reached. Another thing that can happen is that the solution can return that the solver was unstable (exploded to infinity) or that `dt` became too small. If these happen, the first thing to do is to check that your model is correct. It could very well be that you made an error that causes the model to be unstable! - -If the model is the problem, then stiffness could be the reason. We can thus hint to the solver to use an appropriate method: - -```julia -sol = solve(prob,alg_hints = [:stiff]) -``` - -Or we can use the default algorithm. By default, DifferentialEquations.jl uses algorithms like `AutoTsit5(Rodas5())` which automatically detect stiffness and switch to an appropriate method once stiffness is known. - -```julia -sol = solve(prob) -``` - -Another way to understand stiffness is to look at the solution. - -```julia -using Plots; gr() -sol = solve(prob,alg_hints = [:stiff],reltol=1e-6) -plot(sol,denseplot=false) -``` - -Let's zoom in on the y-axis to see what's going on: - -```julia -plot(sol,ylims = (-10.0,10.0)) -``` - -Notice how there are some extreme vertical shifts that occur. These vertical shifts are places where the derivative term is very large, and this is indicative of stiffness. This is an extreme example to highlight the behavior, but this general idea can be carried over to your problem. When in doubt, simply try timing using both a stiff solver and a non-stiff solver and see which is more efficient. - -To try this out, let's use BenchmarkTools, a package that let's us relatively reliably time code blocks. - -```julia -function lorenz!(du,u,p,t) - σ,ρ,β = p - du[1] = σ*(u[2]-u[1]) - du[2] = u[1]*(ρ-u[3]) - u[2] - du[3] = u[1]*u[2] - β*u[3] -end -u0 = [1.0,0.0,0.0] -p = (10,28,8/3) -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan,p) -``` - -And now, let's use the `@btime` macro from benchmark tools to compare the use of non-stiff and stiff solvers on this problem. - -```julia -using BenchmarkTools -@btime solve(prob); -``` - -```julia -@btime solve(prob,alg_hints = [:stiff]); -``` - -In this particular case, we can see that non-stiff solvers get us to the solution much more quickly. - -## The Recommended Methods - -When picking a method, the general rules are as follows: - -- Higher order is more efficient at lower tolerances, lower order is more efficient at higher tolerances -- Adaptivity is essential in most real-world scenarios -- Runge-Kutta methods do well with non-stiff equations, Rosenbrock methods do well with small stiff equations, BDF methods do well with large stiff equations - -While there are always exceptions to the rule, those are good guiding principles. Based on those, a simple way to choose methods is: - -- The default is `Tsit5()`, a non-stiff Runge-Kutta method of Order 5 -- If you use low tolerances (`1e-8`), try `Vern7()` or `Vern9()` -- If you use high tolerances, try `BS3()` -- If the problem is stiff, try `Rosenbrock23()`, `Rodas5()`, or `CVODE_BDF()` -- If you don't know, use `AutoTsit5(Rosenbrock23())` or `AutoVern9(Rodas5())`. - -(This is a simplified version of the default algorithm chooser) - -## Comparison to other Software - -If you are familiar with MATLAB, SciPy, or R's DESolve, here's a quick translation start to have transfer your knowledge over. - -- `ode23` -> `BS3()` -- `ode45`/`dopri5` -> `DP5()`, though in most cases `Tsit5()` is more efficient -- `ode23s` -> `Rosenbrock23()`, though in most cases `Rodas4()` is more efficient -- `ode113` -> `VCABM()`, though in many cases `Vern7()` is more efficient -- `dop853` -> `DP8()`, though in most cases `Vern7()` is more efficient -- `ode15s`/`vode` -> `QNDF()`, though in many cases `CVODE_BDF()`, `Rodas4()` - or `radau()` are more efficient -- `ode23t` -> `Trapezoid()` for efficiency and `GenericTrapezoid()` for robustness -- `ode23tb` -> `TRBDF2` -- `lsoda` -> `lsoda()` (requires `]add LSODA; using LSODA`) -- `ode15i` -> `IDA()`, though in many cases `Rodas4()` can handle the DAE and is - significantly more efficient - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/03-optimizing_diffeq_code.jmd b/tutorials/introduction/03-optimizing_diffeq_code.jmd deleted file mode 100644 index 6a9971d7..00000000 --- a/tutorials/introduction/03-optimizing_diffeq_code.jmd +++ /dev/null @@ -1,492 +0,0 @@ ---- -title: Optimizing DiffEq Code -author: Chris Rackauckas ---- - -In this notebook we will walk through some of the main tools for optimizing your code in order to efficiently solve DifferentialEquations.jl. User-side optimizations are important because, for sufficiently difficult problems, most of the time will be spent inside of your `f` function, the function you are trying to solve. "Efficient" integrators are those that reduce the required number of `f` calls to hit the error tolerance. The main ideas for optimizing your DiffEq code, or any Julia function, are the following: - -- Make it non-allocating -- Use StaticArrays for small arrays -- Use broadcast fusion -- Make it type-stable -- Reduce redundant calculations -- Make use of BLAS calls -- Optimize algorithm choice - -We'll discuss these strategies in the context of small and large systems. Let's start with small systems. - -## Optimizing Small Systems (<100 DEs) - -Let's take the classic Lorenz system from before. Let's start by naively writing the system in its out-of-place form: - -```julia -function lorenz(u,p,t) - dx = 10.0*(u[2]-u[1]) - dy = u[1]*(28.0-u[3]) - u[2] - dz = u[1]*u[2] - (8/3)*u[3] - [dx,dy,dz] -end -``` - -Here, `lorenz` returns an object, `[dx,dy,dz]`, which is created within the body of `lorenz`. - -This is a common code pattern from high-level languages like MATLAB, SciPy, or R's deSolve. However, the issue with this form is that it allocates a vector, `[dx,dy,dz]`, at each step. Let's benchmark the solution process with this choice of function: - -```julia -using DifferentialEquations, BenchmarkTools -u0 = [1.0;0.0;0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz,u0,tspan) -@benchmark solve(prob,Tsit5()) -``` - -The BenchmarkTools package's `@benchmark` runs the code multiple times to get an accurate measurement. The minimum time is the time it takes when your OS and other background processes aren't getting in the way. Notice that in this case it takes about 5ms to solve and allocates around 11.11 MiB. However, if we were to use this inside of a real user code we'd see a lot of time spent doing garbage collection (GC) to clean up all of the arrays we made. Even if we turn off saving we have these allocations. - -```julia -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -The problem of course is that arrays are created every time our derivative function is called. This function is called multiple times per step and is thus the main source of memory usage. To fix this, we can use the in-place form to ***make our code non-allocating***: - -```julia -function lorenz!(du,u,p,t) - du[1] = 10.0*(u[2]-u[1]) - du[2] = u[1]*(28.0-u[3]) - u[2] - du[3] = u[1]*u[2] - (8/3)*u[3] -end -``` - -Here, instead of creating an array each time, we utilized the cache array `du`. When the inplace form is used, DifferentialEquations.jl takes a different internal route that minimizes the internal allocations as well. When we benchmark this function, we will see quite a difference. - -```julia -u0 = [1.0;0.0;0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan) -@benchmark solve(prob,Tsit5()) -``` - -```julia -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -There is a 4x time difference just from that change! Notice there are still some allocations and this is due to the construction of the integration cache. But this doesn't scale with the problem size: - -```julia -tspan = (0.0,500.0) # 5x longer than before -prob = ODEProblem(lorenz!,u0,tspan) -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -since that's all just setup allocations. - -#### But if the system is small we can optimize even more. - -Allocations are only expensive if they are "heap allocations". For a more in-depth definition of heap allocations, [there are a lot of sources online](http://net-informations.com/faq/net/stack-heap.htm). But a good working definition is that heap allocations are variable-sized slabs of memory which have to be pointed to, and this pointer indirection costs time. Additionally, the heap has to be managed and the garbage controllers has to actively keep track of what's on the heap. - -However, there's an alternative to heap allocations, known as stack allocations. The stack is statically-sized (known at compile time) and thus its accesses are quick. Additionally, the exact block of memory is known in advance by the compiler, and thus re-using the memory is cheap. This means that allocating on the stack has essentially no cost! - -Arrays have to be heap allocated because their size (and thus the amount of memory they take up) is determined at runtime. But there are structures in Julia which are stack-allocated. `struct`s for example are stack-allocated "value-type"s. `Tuple`s are a stack-allocated collection. The most useful data structure for DiffEq though is the `StaticArray` from the package [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl). These arrays have their length determined at compile-time. They are created using macros attached to normal array expressions, for example: - -```julia -using StaticArrays -A = @SVector [2.0,3.0,5.0] -``` - -Notice that the `3` after `SVector` gives the size of the `SVector`. It cannot be changed. Additionally, `SVector`s are immutable, so we have to create a new `SVector` to change values. But remember, we don't have to worry about allocations because this data structure is stack-allocated. `SArray`s have a lot of extra optimizations as well: they have fast matrix multiplication, fast QR factorizations, etc. which directly make use of the information about the size of the array. Thus, when possible they should be used. - -Unfortunately static arrays can only be used for sufficiently small arrays. After a certain size, they are forced to heap allocate after some instructions and their compile time balloons. Thus static arrays shouldn't be used if your system has more than 100 variables. Additionally, only the native Julia algorithms can fully utilize static arrays. - -Let's ***optimize `lorenz` using static arrays***. Note that in this case, we want to use the out-of-place allocating form, but this time we want to output a static array: - -```julia -function lorenz_static(u,p,t) - dx = 10.0*(u[2]-u[1]) - dy = u[1]*(28.0-u[3]) - u[2] - dz = u[1]*u[2] - (8/3)*u[3] - @SVector [dx,dy,dz] -end -``` - -To make the solver internally use static arrays, we simply give it a static array as the initial condition: - -```julia -u0 = @SVector [1.0,0.0,0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz_static,u0,tspan) -@benchmark solve(prob,Tsit5()) -``` - -```julia -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -And that's pretty much all there is to it. With static arrays you don't have to worry about allocating, so use operations like `*` and don't worry about fusing operations (discussed in the next section). Do "the vectorized code" of R/MATLAB/Python and your code in this case will be fast, or directly use the numbers/values. - -#### Exercise 1 - -Implement the out-of-place array, in-place array, and out-of-place static array forms for the [Henon-Heiles System](https://en.wikipedia.org/wiki/H%C3%A9non%E2%80%93Heiles_system) and time the results. - -## Optimizing Large Systems - -### Interlude: Managing Allocations with Broadcast Fusion - -When your system is sufficiently large, or you have to make use of a non-native Julia algorithm, you have to make use of `Array`s. In order to use arrays in the most efficient manner, you need to be careful about temporary allocations. Vectorized calculations naturally have plenty of temporary array allocations. This is because a vectorized calculation outputs a vector. Thus: - -```julia -A = rand(1000,1000); B = rand(1000,1000); C = rand(1000,1000) -test(A,B,C) = A + B + C -@benchmark test(A,B,C) -``` -That expression `A + B + C` creates 2 arrays. It first creates one for the output of `A + B`, then uses that result array to `+ C` to get the final result. 2 arrays! We don't want that! The first thing to do to fix this is to use broadcast fusion. [Broadcast fusion](https://julialang.org/blog/2017/01/moredots) puts expressions together. For example, instead of doing the `+` operations separately, if we were to add them all at the same time, then we would only have a single array that's created. For example: - -```julia -test2(A,B,C) = map((a,b,c)->a+b+c,A,B,C) -@benchmark test2(A,B,C) -``` - -Puts the whole expression into a single function call, and thus only one array is required to store output. This is the same as writing the loop: - -```julia -function test3(A,B,C) - D = similar(A) - @inbounds for i in eachindex(A) - D[i] = A[i] + B[i] + C[i] - end - D -end -@benchmark test3(A,B,C) -``` - -However, Julia's broadcast is syntactic sugar for this. If multiple expressions have a `.`, then it will put those vectorized operations together. Thus: - -```julia -test4(A,B,C) = A .+ B .+ C -@benchmark test4(A,B,C) -``` - -is a version with only 1 array created (the output). Note that `.`s can be used with function calls as well: - -```julia -sin.(A) .+ sin.(B) -``` - -Also, the `@.` macro applys a dot to every operator: - -```julia -test5(A,B,C) = @. A + B + C #only one array allocated -@benchmark test5(A,B,C) -``` - -Using these tools we can get rid of our intermediate array allocations for many vectorized function calls. But we are still allocating the output array. To get rid of that allocation, we can instead use mutation. Mutating broadcast is done via `.=`. For example, if we pre-allocate the output: - -```julia -D = zeros(1000,1000); -``` - -Then we can keep re-using this cache for subsequent calculations. The mutating broadcasting form is: - -```julia -test6!(D,A,B,C) = D .= A .+ B .+ C #only one array allocated -@benchmark test6!(D,A,B,C) -``` - -If we use `@.` before the `=`, then it will turn it into `.=`: - -```julia -test7!(D,A,B,C) = @. D = A + B + C #only one array allocated -@benchmark test7!(D,A,B,C) -``` - -Notice that in this case, there is no "output", and instead the values inside of `D` are what are changed (like with the DiffEq inplace function). Many Julia functions have a mutating form which is denoted with a `!`. For example, the mutating form of the `map` is `map!`: - -```julia -test8!(D,A,B,C) = map!((a,b,c)->a+b+c,D,A,B,C) -@benchmark test8!(D,A,B,C) -``` - -Some operations require using an alternate mutating form in order to be fast. For example, matrix multiplication via `*` allocates a temporary: - -```julia -@benchmark A*B -``` - -Instead, we can use the mutating form `mul!` into a cache array to avoid allocating the output: - -```julia -using LinearAlgebra -@benchmark mul!(D,A,B) # same as D = A * B -``` - -For repeated calculations this reduced allocation can stop GC cycles and thus lead to more efficient code. Additionally, ***we can fuse together higher level linear algebra operations using BLAS***. The package [SugarBLAS.jl](https://github.com/lopezm94/SugarBLAS.jl) makes it easy to write higher level operations like `alpha*B*A + beta*C` as mutating BLAS calls. - -### Example Optimization: Gierer-Meinhardt Reaction-Diffusion PDE Discretization - -Let's optimize the solution of a Reaction-Diffusion PDE's discretization. In its discretized form, this is the ODE: - -$$ -\begin{align} -du &= D_1 (A_y u + u A_x) + \frac{au^2}{v} + \bar{u} - \alpha u\\ -dv &= D_2 (A_y v + v A_x) + a u^2 + \beta v -\end{align} -$$ - -where $u$, $v$, and $A$ are matrices. Here, we will use the simplified version where $A$ is the tridiagonal stencil $[1,-2,1]$, i.e. it's the 2D discretization of the LaPlacian. The native code would be something along the lines of: - -```julia -# Generate the constants -p = (1.0,1.0,1.0,10.0,0.001,100.0) # a,α,ubar,β,D1,D2 -N = 100 -Ax = Array(Tridiagonal([1.0 for i in 1:N-1],[-2.0 for i in 1:N],[1.0 for i in 1:N-1])) -Ay = copy(Ax) -Ax[2,1] = 2.0 -Ax[end-1,end] = 2.0 -Ay[1,2] = 2.0 -Ay[end,end-1] = 2.0 - -function basic_version!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = r[:,:,1] - v = r[:,:,2] - Du = D1*(Ay*u + u*Ax) - Dv = D2*(Ay*v + v*Ax) - dr[:,:,1] = Du .+ a.*u.*u./v .+ ubar .- α*u - dr[:,:,2] = Dv .+ a.*u.*u .- β*v -end - -a,α,ubar,β,D1,D2 = p -uss = (ubar+β)/α -vss = (a/β)*uss^2 -r0 = zeros(100,100,2) -r0[:,:,1] .= uss.+0.1.*rand.() -r0[:,:,2] .= vss - -prob = ODEProblem(basic_version!,r0,(0.0,0.1),p) -``` - -In this version we have encoded our initial condition to be a 3-dimensional array, with `u[:,:,1]` being the `A` part and `u[:,:,2]` being the `B` part. - -```julia -@benchmark solve(prob,Tsit5()) -``` - -While this version isn't very efficient, - -#### We recommend writing the "high-level" code first, and iteratively optimizing it! - -The first thing that we can do is get rid of the slicing allocations. The operation `r[:,:,1]` creates a temporary array instead of a "view", i.e. a pointer to the already existing memory. To make it a view, add `@view`. Note that we have to be careful with views because they point to the same memory, and thus changing a view changes the original values: - -```julia -A = rand(4) -@show A -B = @view A[1:3] -B[2] = 2 -@show A -``` - -Notice that changing `B` changed `A`. This is something to be careful of, but at the same time we want to use this since we want to modify the output `dr`. Additionally, the last statement is a purely element-wise operation, and thus we can make use of broadcast fusion there. Let's rewrite `basic_version!` to ***avoid slicing allocations*** and to ***use broadcast fusion***: - -```julia -function gm2!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - Du = D1*(Ay*u + u*Ax) - Dv = D2*(Ay*v + v*Ax) - @. du = Du + a.*u.*u./v + ubar - α*u - @. dv = Dv + a.*u.*u - β*v -end -prob = ODEProblem(gm2!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) -``` - -Now, most of the allocations are taking place in `Du = D1*(Ay*u + u*Ax)` since those operations are vectorized and not mutating. We should instead replace the matrix multiplications with `mul!`. When doing so, we will need to have cache variables to write into. This looks like: - -```julia -Ayu = zeros(N,N) -uAx = zeros(N,N) -Du = zeros(N,N) -Ayv = zeros(N,N) -vAx = zeros(N,N) -Dv = zeros(N,N) -function gm3!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - mul!(Ayu,Ay,u) - mul!(uAx,u,Ax) - mul!(Ayv,Ay,v) - mul!(vAx,v,Ax) - @. Du = D1*(Ayu + uAx) - @. Dv = D2*(Ayv + vAx) - @. du = Du + a*u*u./v + ubar - α*u - @. dv = Dv + a*u*u - β*v -end -prob = ODEProblem(gm3!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) -``` - -But our temporary variables are global variables. We need to either declare the caches as `const` or localize them. We can localize them by adding them to the parameters, `p`. It's easier for the compiler to reason about local variables than global variables. ***Localizing variables helps to ensure type stability***. - -```julia -p = (1.0,1.0,1.0,10.0,0.001,100.0,Ayu,uAx,Du,Ayv,vAx,Dv) # a,α,ubar,β,D1,D2 -function gm4!(dr,r,p,t) - a,α,ubar,β,D1,D2,Ayu,uAx,Du,Ayv,vAx,Dv = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - mul!(Ayu,Ay,u) - mul!(uAx,u,Ax) - mul!(Ayv,Ay,v) - mul!(vAx,v,Ax) - @. Du = D1*(Ayu + uAx) - @. Dv = D2*(Ayv + vAx) - @. du = Du + a*u*u./v + ubar - α*u - @. dv = Dv + a*u*u - β*v -end -prob = ODEProblem(gm4!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) -``` - -We could then use the BLAS `gemmv` to optimize the matrix multiplications some more, but instead let's devectorize the stencil. - -```julia -p = (1.0,1.0,1.0,10.0,0.001,100.0,N) -function fast_gm!(du,u,p,t) - a,α,ubar,β,D1,D2,N = p - - @inbounds for j in 2:N-1, i in 2:N-1 - du[i,j,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - - @inbounds for j in 2:N-1, i in 2:N-1 - du[i,j,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds for j in 2:N-1 - i = 1 - du[1,j,1] = D1*(2u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for j in 2:N-1 - i = 1 - du[1,j,2] = D2*(2u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - @inbounds for j in 2:N-1 - i = N - du[end,j,1] = D1*(2u[i-1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for j in 2:N-1 - i = N - du[end,j,2] = D2*(2u[i-1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds for i in 2:N-1 - j = 1 - du[i,1,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for i in 2:N-1 - j = 1 - du[i,1,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - @inbounds for i in 2:N-1 - j = N - du[i,end,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for i in 2:N-1 - j = N - du[i,end,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds begin - i = 1; j = 1 - du[1,1,1] = D1*(2u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[1,1,2] = D2*(2u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = 1; j = N - du[1,N,1] = D1*(2u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[1,N,2] = D2*(2u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = N; j = 1 - du[N,1,1] = D1*(2u[i-1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[N,1,2] = D2*(2u[i-1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = N; j = N - du[end,end,1] = D1*(2u[i-1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[end,end,2] = D2*(2u[i-1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end -end -prob = ODEProblem(fast_gm!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) -``` - -Lastly, we can do other things like multithread the main loops, but these optimizations get the last 2x-3x out. The main optimizations which apply everywhere are the ones we just performed (though the last one only works if your matrix is a stencil. This is known as a matrix-free implementation of the PDE discretization). - -This gets us to about 8x faster than our original MATLAB/SciPy/R vectorized style code! - -The last thing to do is then ***optimize our algorithm choice***. We have been using `Tsit5()` as our test algorithm, but in reality this problem is a stiff PDE discretization and thus one recommendation is to use `CVODE_BDF()`. However, instead of using the default dense Jacobian, we should make use of the sparse Jacobian afforded by the problem. The Jacobian is the matrix $\frac{df_i}{dr_j}$, where $r$ is read by the linear index (i.e. down columns). But since the $u$ variables depend on the $v$, the band size here is large, and thus this will not do well with a Banded Jacobian solver. Instead, we utilize sparse Jacobian algorithms. `CVODE_BDF` allows us to use a sparse Newton-Krylov solver by setting `linear_solver = :GMRES` (see [the solver documentation](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html#Sundials.jl-1), and thus we can solve this problem efficiently. Let's see how this scales as we increase the integration time. - -```julia -prob = ODEProblem(fast_gm!,r0,(0.0,10.0),p) -@benchmark solve(prob,Tsit5()) -``` - -```julia -using Sundials -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES)) -``` - -```julia -prob = ODEProblem(fast_gm!,r0,(0.0,100.0),p) -# Will go out of memory if we don't turn off `save_everystep`! -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -```julia -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES)) -``` - -Now let's check the allocation growth. - -```julia -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) -``` - -```julia -prob = ODEProblem(fast_gm!,r0,(0.0,500.0),p) -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) -``` - -Notice that we've elimated almost all allocations, allowing the code to grow without hitting garbage collection and slowing down. - -Why is `CVODE_BDF` doing well? What's happening is that, because the problem is stiff, the number of steps required by the explicit Runge-Kutta method grows rapidly, whereas `CVODE_BDF` is taking large steps. Additionally, the `GMRES` linear solver form is quite an efficient way to solve the implicit system in this case. This is problem-dependent, and in many cases using a Krylov method effectively requires a preconditioner, so you need to play around with testing other algorithms and linear solvers to find out what works best with your problem. - -## Conclusion - -Julia gives you the tools to optimize the solver "all the way", but you need to make use of it. The main thing to avoid is temporary allocations. For small systems, this is effectively done via static arrays. For large systems, this is done via in-place operations and cache arrays. Either way, the resulting solution can be immensely sped up over vectorized formulations by using these principles. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/04-callbacks_and_events.jmd b/tutorials/introduction/04-callbacks_and_events.jmd deleted file mode 100644 index 810d7b00..00000000 --- a/tutorials/introduction/04-callbacks_and_events.jmd +++ /dev/null @@ -1,327 +0,0 @@ ---- -title: Callbacks and Events -author: Chris Rackauckas ---- - -In working with a differential equation, our system will evolve through many states. Particular states of the system may be of interest to us, and we say that an ***"event"*** is triggered when our system reaches these states. For example, events may include the moment when our system reaches a particular temperature or velocity. We ***handle*** these events with ***callbacks***, which tell us what to do once an event has been triggered. - -These callbacks allow for a lot more than event handling, however. For example, we can use callbacks to achieve high-level behavior like exactly preserve conservation laws and save the trace of a matrix at pre-defined time points. This extra functionality allows us to use the callback system as a modding system for the DiffEq ecosystem's solvers. - -This tutorial is an introduction to the callback and event handling system in DifferentialEquations.jl, documented in the [Event Handling and Callback Functions](http://docs.juliadiffeq.org/dev/features/callback_functions.html) page of the documentation. We will also introduce you to some of the most widely used callbacks in the [Callback Library](http://docs.juliadiffeq.org/dev/features/callback_library.html), which is a library of pre-built mods. - -## Events and Continuous Callbacks - -Event handling is done through continuous callbacks. Callbacks take a function, `condition`, which triggers an `affect!` when `condition == 0`. These callbacks are called "continuous" because they will utilize rootfinding on the interpolation to find the "exact" time point at which the condition takes place and apply the `affect!` at that time point. - -***Let's use a bouncing ball as a simple system to explain events and callbacks.*** Let's take Newton's model of a ball falling towards the Earth's surface via a gravitational constant `g`. In this case, the velocity is changing via `-g`, and position is changing via the velocity. Therefore we receive the system of ODEs: - -```julia -using DifferentialEquations, ParameterizedFunctions -ball! = @ode_def BallBounce begin - dy = v - dv = -g -end g -``` - -We want the callback to trigger when `y=0` since that's when the ball will hit the Earth's surface (our event). We do this with the condition: - -```julia -function condition(u,t,integrator) - u[1] -end -``` - -Recall that the `condition` will trigger when it evaluates to zero, and here it will evaluate to zero when `u[1] == 0`, which occurs when `v == 0`. *Now we have to say what we want the callback to do.* Callbacks make use of the [Integrator Interface](http://docs.juliadiffeq.org/dev/basics/integrator.html). Instead of giving a full description, a quick and usable rundown is: - -- Values are strored in `integrator.u` -- Times are stored in `integrator.t` -- The parameters are stored in `integrator.p` -- `integrator(t)` performs an interpolation in the current interval between `integrator.tprev` and `integrator.t` (and allows extrapolation) -- User-defined options (tolerances, etc.) are stored in `integrator.opts` -- `integrator.sol` is the current solution object. Note that `integrator.sol.prob` is the current problem - -While there's a lot more on the integrator interface page, that's a working knowledge of what's there. - -What we want to do with our `affect!` is to "make the ball bounce". Mathematically speaking, the ball bounces when the sign of the velocity flips. As an added behavior, let's also use a small friction constant to dampen the ball's velocity. This way only a percentage of the velocity will be retained when the event is triggered and the callback is used. We'll define this behavior in the `affect!` function: - -```julia -function affect!(integrator) - integrator.u[2] = -integrator.p[2] * integrator.u[2] -end -``` - -`integrator.u[2]` is the second value of our model, which is `v` or velocity, and `integrator.p[2]`, is our friction coefficient. - -Therefore `affect!` can be read as follows: `affect!` will take the current value of velocity, and multiply it `-1` multiplied by our friction coefficient. Therefore the ball will change direction and its velocity will dampen when `affect!` is called. - -Now let's build the `ContinuousCallback`: - -```julia -bounce_cb = ContinuousCallback(condition,affect!) -``` - -Now let's make an `ODEProblem` which has our callback: - -```julia -u0 = [50.0,0.0] -tspan = (0.0,15.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=bounce_cb) -``` - -Notice that we chose a friction constant of `0.9`. Now we can solve the problem and plot the solution as we normally would: - -```julia -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) -``` - -and tada, the ball bounces! Notice that the `ContinuousCallback` is using the interpolation to apply the effect "exactly" when `v == 0`. This is crucial for model correctness, and thus when this property is needed a `ContinuousCallback` should be used. - -#### Exercise 1 - -In our example we used a constant coefficient of friction, but if we are bouncing the ball in the same place we may be smoothing the surface (say, squishing the grass), causing there to be less friction after each bounce. In this more advanced model, we want the friction coefficient at the next bounce to be `sqrt(friction)` from the previous bounce (since `friction < 1`, `sqrt(friction) > friction` and `sqrt(friction) < 1`). - -Hint: there are many ways to implement this. One way to do it is to make `p` a `Vector` and mutate the friction coefficient in the `affect!`. - -## Discrete Callbacks - -A discrete callback checks a `condition` after every integration step and, if true, it will apply an `affect!`. For example, let's say that at time `t=2` we want to include that a kid kicked the ball, adding `20` to the current velocity. This kind of situation, where we want to add a specific behavior which does not require rootfinding, is a good candidate for a `DiscreteCallback`. In this case, the `condition` is a boolean for whether to apply the `affect!`, so: - -```julia -function condition_kick(u,t,integrator) - t == 2 -end -``` - -We want the kick to occur at `t=2`, so we check for that time point. When we are at this time point, we want to do: - -```julia -function affect_kick!(integrator) - integrator.u[2] += 50 -end -``` - -Now we build the problem as before: - -```julia -kick_cb = DiscreteCallback(condition_kick,affect_kick!) -u0 = [50.0,0.0] -tspan = (0.0,10.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=kick_cb) -``` - -Note that, since we are requiring our effect at exactly the time `t=2`, we need to tell the integration scheme to step at exactly `t=2` to apply this callback. This is done via the option `tstops`, which is like `saveat` but means "stop at these values". - -```julia -sol = solve(prob,Tsit5(),tstops=[2.0]) -plot(sol) -``` - -Note that this example could've been done with a `ContinuousCallback` by checking the condition `t-2`. - -## Merging Callbacks with Callback Sets - -In some cases you may want to merge callbacks to build up more complex behavior. In our previous result, notice that the model is unphysical because the ball goes below zero! What we really need to do is add the bounce callback together with the kick. This can be achieved through the `CallbackSet`. - -```julia -cb = CallbackSet(bounce_cb,kick_cb) -``` - -A `CallbackSet` merges their behavior together. The logic is as follows. In a given interval, if there are multiple continuous callbacks that would trigger, only the one that triggers at the earliest time is used. The time is pulled back to where that continuous callback is triggered, and then the `DiscreteCallback`s in the callback set are called in order. - -```julia -u0 = [50.0,0.0] -tspan = (0.0,15.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=cb) -sol = solve(prob,Tsit5(),tstops=[2.0]) -plot(sol) -``` - -Notice that we have now merged the behaviors. We can then nest this as deep as we like. - -#### Exercise 2 - -Add to the model a linear wind with resistance that changes the acceleration to `-g + k*v` after `t=10`. Do so by adding another parameter and allowing it to be zero until a specific time point where a third callback triggers the change. - -## Integration Termination and Directional Handling - -Let's look at another model now: the model of the [Harmonic Oscillator](https://en.wikipedia.org/wiki/Harmonic_oscillator). We can write this as: - -```julia -u0 = [1.,0.] -harmonic! = @ode_def HarmonicOscillator begin - dv = -x - dx = v -end -tspan = (0.0,10.0) -prob = ODEProblem(harmonic!,u0,tspan) -sol = solve(prob) -plot(sol) -``` - -Let's instead stop the integration when a condition is met. From the [Integrator Interface stepping controls](http://docs.juliadiffeq.org/dev/basics/integrator.html#Stepping-Controls-1) we see that `terminate!(integrator)` will cause the integration to end. So our new `affect!` is simply: - -```julia -function terminate_affect!(integrator) - terminate!(integrator) -end -``` - -Let's first stop the integration when the particle moves back to `x=0`. This means we want to use the condition: - -```julia -function terminate_condition(u,t,integrator) - u[2] -end -terminate_cb = ContinuousCallback(terminate_condition,terminate_affect!) -``` - -Note that instead of adding callbacks to the problem, we can also add them to the `solve` command. This will automatically form a `CallbackSet` with any problem-related callbacks and naturally allows you to distinguish between model features and integration controls. - -```julia -sol = solve(prob,callback=terminate_cb) -plot(sol) -``` - -Notice that the harmonic oscilator's true solution here is `sin` and `cosine`, and thus we would expect this return to zero to happen at `t=π`: - -```julia -sol.t[end] -``` - -This is one way to approximate π! Lower tolerances and arbitrary precision numbers can make this more exact, but let's not look at that. Instead, what if we wanted to halt the integration after exactly one cycle? To do so we would need to ignore the first zero-crossing. Luckily in these types of scenarios there's usually a structure to the problem that can be exploited. Here, we only want to trigger the `affect!` when crossing from positive to negative, and not when crossing from negative to positive. In other words, we want our `affect!` to only occur on upcrossings. - -If the `ContinuousCallback` constructor is given a single `affect!`, it will occur on both upcrossings and downcrossings. If there are two `affect!`s given, then the first is for upcrossings and the second is for downcrossings. An `affect!` can be ignored by using `nothing`. Together, the "upcrossing-only" version of the effect means that the first `affect!` is what we defined above and the second is `nothing`. Therefore we want: - -```julia -terminate_upcrossing_cb = ContinuousCallback(terminate_condition,terminate_affect!,nothing) -``` - -Which gives us: - -```julia -sol = solve(prob,callback=terminate_upcrossing_cb) -plot(sol) -``` - -## Callback Library - -As you can see, callbacks can be very useful and through `CallbackSets` we can merge together various behaviors. Because of this utility, there is a library of pre-built callbacks known as the [Callback Library](http://docs.juliadiffeq.org/dev/features/callback_library.html). We will walk through a few examples where these callbacks can come in handy. - -### Manifold Projection - -One callback is the manifold projection callback. Essentially, you can define any manifold `g(sol)=0` which the solution must live on, and cause the integration to project to that manifold after every step. As an example, let's see what happens if we naively run the harmonic oscillator for a long time: - -```julia -tspan = (0.0,10000.0) -prob = ODEProblem(harmonic!,u0,tspan) -sol = solve(prob) -gr(fmt=:png) # Make it a PNG instead of an SVG since there's a lot of points! -plot(sol,vars=(1,2)) -``` - -```julia -plot(sol,vars=(0,1),denseplot=false) -``` - -Notice that what's going on is that the numerical solution is drifting from the true solution over this long time scale. This is because the integrator is not conserving energy. - -```julia -plot(sol.t,[u[2]^2 + u[1]^2 for u in sol.u]) # Energy ~ x^2 + v^2 -``` - -Some integration techniques like [symplectic integrators](http://docs.juliadiffeq.org/dev/solvers/dynamical_solve.html#Symplectic-Integrators-1) are designed to mitigate this issue, but instead let's tackle the problem by enforcing conservation of energy. To do so, we define our manifold as the one where energy equals 1 (since that holds in the initial condition), that is: - -```julia -function g(resid,u,p,t) - resid[1] = u[2]^2 + u[1]^2 - 1 - resid[2] = 0 -end -``` - -Here the residual measures how far from our desired energy we are, and the number of conditions matches the size of our system (we ignored the second one by making the residual 0). Thus we define a `ManifoldProjection` callback and add that to the solver: - -```julia -cb = ManifoldProjection(g) -sol = solve(prob,callback=cb) -plot(sol,vars=(1,2)) -``` - -```julia -plot(sol,vars=(0,1),denseplot=false) -``` - -Now we have "perfect" energy conservation, where if it's ever violated too much the solution will get projected back to `energy=1`. - -```julia -u1,u2 = sol[500] -u2^2 + u1^2 -``` - -While choosing different integration schemes and using lower tolerances can achieve this effect as well, this can be a nice way to enforce physical constraints and is thus used in many disciplines like molecular dynamics. Another such domain constraining callback is the [`PositiveCallback()`](http://docs.juliadiffeq.org/dev/features/callback_library.html#PositiveDomain-1) which can be used to enforce positivity of the variables. - -### SavingCallback - -The `SavingCallback` can be used to allow for special saving behavior. Let's take a linear ODE define on a system of 1000x1000 matrices: - -```julia -prob = ODEProblem((du,u,p,t)->du.=u,rand(1000,1000),(0.0,1.0)) -``` - -In fields like quantum mechanics you may only want to know specific properties of the solution such as the trace or the norm of the matrix. Saving all of the 1000x1000 matrices can be a costly way to get this information! Instead, we can use the `SavingCallback` to save the `trace` and `norm` at specified times. To do so, we first define our `SavedValues` cache. Our time is in terms of `Float64`, and we want to save tuples of `Float64`s (one for the `trace` and one for the `norm`), and thus we generate the cache as: - -```julia -saved_values = SavedValues(Float64, Tuple{Float64,Float64}) -``` - -Now we define the `SavingCallback` by giving it a function of `(u,p,t,integrator)` that returns the values to save, and the cache: - -```julia -using LinearAlgebra -cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values) -``` - -Here we take `u` and save `(tr(u),norm(u))`. When we solve with this callback: - -```julia -sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving -``` - -Our values are stored in our `saved_values` variable: - -```julia -saved_values.t -``` - -```julia -saved_values.saveval -``` - -By default this happened only at the solver's steps. But the `SavingCallback` has similar controls as the integrator. For example, if we want to save at every `0.1` seconds, we do can so using `saveat`: - -```julia -saved_values = SavedValues(Float64, Tuple{Float64,Float64}) # New cache -cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values, saveat = 0.0:0.1:1.0) -sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving -``` - -```julia -saved_values.t -``` - -```julia -saved_values.saveval -``` - -#### Exercise 3 - -Go back to the Harmonic oscillator. Use the `SavingCallback` to save an array for the energy over time, and do this both with and without the `ManifoldProjection`. Plot the results to see the difference the projection makes. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/05-formatting_plots.jmd b/tutorials/introduction/05-formatting_plots.jmd deleted file mode 100644 index 444e9c04..00000000 --- a/tutorials/introduction/05-formatting_plots.jmd +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Formatting Plots -author: Chris Rackauckas ---- - -Since the plotting functionality is implemented as a recipe to Plots.jl, [all of the options open to Plots.jl can be used in our plots](https://juliaplots.github.io/supported/). In addition, there are special features specifically for [differential equation plots](http://docs.juliadiffeq.org/dev/basics/plot.html). This tutorial will teach some of the most commonly used options. Let's first get the solution to some ODE. Here I will use one of the Lorenz ordinary differential equation. As with all commands in DifferentialEquations.jl, I got a plot of the solution by calling `solve` on the problem, and `plot` on the solution: - -```julia -using DifferentialEquations, Plots, ParameterizedFunctions -gr() -lorenz = @ode_def Lorenz begin - dx = σ*(y-x) - dy = ρ*x-y-x*z - dz = x*y-β*z -end σ β ρ - -p = [10.0,8/3,28] -u0 = [1., 5., 10.] -tspan = (0., 100.) -prob = ODEProblem(lorenz, u0, tspan, p) -sol = solve(prob) -``` - -```julia -plot(sol) -``` - -Now let's change it to a phase plot. As discussed in the [plot functions page](http://docs.juliadiffeq.org/dev/basics/plot.html), we can use the `vars` command to choose the variables to plot. Let's plot variable `x` vs variable `y` vs variable `z`: - -```julia -plot(sol,vars=(1, 2, 3)) -``` - -We can also choose to plot the timeseries for a single variable: - -```julia -plot(sol,vars=[:x]) -``` - -Notice that we were able to use the variable names because we had defined the problem with the macro. But in general, we can use the indices. The previous plots would be: - -```julia -plot(sol,vars=(1,2,3)) -plot(sol,vars=[1]) -``` - -Common options are to add titles, axis, and labels. For example: - -```julia -plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line", -xaxis="Time (t)",yaxis="u(t) (in mm)",label=["X","Y","Z"]) -``` - -Notice that series recipes apply to the solution type as well. For example, we can use a scatter plot on the timeseries: - -```julia -scatter(sol,vars=[:x]) -``` - -This shows that the recipe is using the interpolation to smooth the plot. It becomes abundantly clear when we turn it off using `denseplot=false`: - -```julia -plot(sol,vars=(1,2,3),denseplot=false) -``` - -When this is done, only the values the timestep hits are plotted. Using the interpolation usually results in a much nicer looking plot so it's recommended, and since the interpolations have similar orders to the numerical methods, their results are trustworthy on the full interval. We can control the number of points used in the interpolation's plot using the `plotdensity` command: - -```julia -plot(sol,vars=(1,2,3),plotdensity=100) -``` - -That's plotting the entire solution using 100 points spaced evenly in time. - -```julia -plot(sol,vars=(1,2,3),plotdensity=10000) -``` - -That's more like it! By default it uses `100*length(sol)`, where the length is the number of internal steps it had to take. This heuristic usually does well, but unusually difficult equations it can be relaxed (since it will take small steps), and for equations with events / discontinuities raising the plot density can help resolve the discontinuity. - -Lastly notice that we can compose plots. Let's show where the 100 points are using a scatter plot: - -```julia -plot(sol,vars=(1,2,3)) -scatter!(sol,vars=(1,2,3),plotdensity=100) -``` - -We can instead work with an explicit plot object. This form can be better for building a complex plot in a loop. - -```julia -p = plot(sol,vars=(1,2,3)) -scatter!(p,sol,vars=(1,2,3),plotdensity=100) -title!("I added a title") -``` - -You can do all sorts of things. Have fun! - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/models/01-classical_physics.jmd b/tutorials/models/01-classical_physics.jmd deleted file mode 100644 index ea3842b7..00000000 --- a/tutorials/models/01-classical_physics.jmd +++ /dev/null @@ -1,342 +0,0 @@ ---- -title: Classical Physics Models -author: Yingbo Ma, Chris Rackauckas ---- - -If you're getting some cold feet to jump in to DiffEq land, here are some handcrafted differential equations mini problems to hold your hand along the beginning of your journey. - -## Radioactive Decay of Carbon-14 - -#### First order linear ODE - -$$f(t,u) = \frac{du}{dt}$$ - -The Radioactive decay problem is the first order linear ODE problem of an exponential with a negative coefficient, which represents the half-life of the process in question. Should the coefficient be positive, this would represent a population growth equation. - -```julia -using OrdinaryDiffEq, Plots -gr() - -#Half-life of Carbon-14 is 5,730 years. -C₁ = 5.730 - -#Setup -u₀ = 1.0 -tspan = (0.0, 1.0) - -#Define the problem -radioactivedecay(u,p,t) = -C₁*u - -#Pass to solver -prob = ODEProblem(radioactivedecay,u₀,tspan) -sol = solve(prob,Tsit5()) - -#Plot -plot(sol,linewidth=2,title ="Carbon-14 half-life", xaxis = "Time in thousands of years", yaxis = "Percentage left", label = "Numerical Solution") -plot!(sol.t, t->exp(-C₁*t),lw=3,ls=:dash,label="Analytical Solution") -``` - -## Simple Pendulum - -#### Second Order Linear ODE - -We will start by solving the pendulum problem. In the physics class, we often solve this problem by small angle approximation, i.e. $ sin(\theta) \approx \theta$, because otherwise, we get an elliptic integral which doesn't have an analytic solution. The linearized form is - -$$\ddot{\theta} + \frac{g}{L}{\theta} = 0$$ - -But we have numerical ODE solvers! Why not solve the *real* pendulum? - -$$\ddot{\theta} + \frac{g}{L}{\sin(\theta)} = 0$$ - -```julia -# Simple Pendulum Problem -using OrdinaryDiffEq, Plots - -#Constants -const g = 9.81 -L = 1.0 - -#Initial Conditions -u₀ = [0,π/2] -tspan = (0.0,6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L)*sin(θ) -end - -#Pass to solvers -prob = ODEProblem(simplependulum,u₀, tspan) -sol = solve(prob,Tsit5()) - -#Plot -plot(sol,linewidth=2,title ="Simple Pendulum Problem", xaxis = "Time", yaxis = "Height", label = ["Theta","dTheta"]) -``` - -So now we know that behaviour of the position versus time. However, it will be useful to us to look at the phase space of the pendulum, i.e., and representation of all possible states of the system in question (the pendulum) by looking at its velocity and position. Phase space analysis is ubiquitous in the analysis of dynamical systems, and thus we will provide a few facilities for it. - -```julia -p = plot(sol,vars = (1,2), xlims = (-9,9), title = "Phase Space Plot", xaxis = "Velocity", yaxis = "Position", leg=false) -function phase_plot(prob, u0, p, tspan=2pi) - _prob = ODEProblem(prob.f,u0,(0.0,tspan)) - sol = solve(_prob,Vern9()) # Use Vern9 solver for higher accuracy - plot!(p,sol,vars = (1,2), xlims = nothing, ylims = nothing) -end -for i in -4pi:pi/2:4π - for j in -4pi:pi/2:4π - phase_plot(prob, [j,i], p) - end -end -plot(p,xlims = (-9,9)) -``` - -## Simple Harmonic Oscillator - -### Double Pendulum - -```julia -#Double Pendulum Problem -using OrdinaryDiffEq, Plots - -#Constants and setup -const m₁, m₂, L₁, L₂ = 1, 2, 1, 2 -initial = [0, π/3, 0, 3pi/5] -tspan = (0.,50.) - -#Convenience function for transforming from polar to Cartesian coordinates -function polar2cart(sol;dt=0.02,l1=L₁,l2=L₂,vars=(2,4)) - u = sol.t[1]:dt:sol.t[end] - - p1 = l1*map(x->x[vars[1]], sol.(u)) - p2 = l2*map(y->y[vars[2]], sol.(u)) - - x1 = l1*sin.(p1) - y1 = l1*-cos.(p1) - (u, (x1 + l2*sin.(p2), - y1 - l2*cos.(p2))) -end - -#Define the Problem -function double_pendulum(xdot,x,p,t) - xdot[1]=x[2] - xdot[2]=-((g*(2*m₁+m₂)*sin(x[1])+m₂*(g*sin(x[1]-2*x[3])+2*(L₂*x[4]^2+L₁*x[2]^2*cos(x[1]-x[3]))*sin(x[1]-x[3])))/(2*L₁*(m₁+m₂-m₂*cos(x[1]-x[3])^2))) - xdot[3]=x[4] - xdot[4]=(((m₁+m₂)*(L₁*x[2]^2+g*cos(x[1]))+L₂*m₂*x[4]^2*cos(x[1]-x[3]))*sin(x[1]-x[3]))/(L₂*(m₁+m₂-m₂*cos(x[1]-x[3])^2)) -end - -#Pass to Solvers -double_pendulum_problem = ODEProblem(double_pendulum, initial, tspan) -sol = solve(double_pendulum_problem, Vern7(), abs_tol=1e-10, dt=0.05); -``` - -```julia -#Obtain coordinates in Cartesian Geometry -ts, ps = polar2cart(sol, l1=L₁, l2=L₂, dt=0.01) -plot(ps...) -``` - -### Poincaré section - -The Poincaré section is a contour plot of a higher-dimensional phase space diagram. It helps to understand the dynamic interactions and is wonderfully pretty. - -The following equation came from [StackOverflow question](https://mathematica.stackexchange.com/questions/40122/help-to-plot-poincar%C3%A9-section-for-double-pendulum) - -$$\frac{d}{dt} - \begin{pmatrix} - \alpha \\ l_\alpha \\ \beta \\ l_\beta - \end{pmatrix}= - \begin{pmatrix} - 2\frac{l_\alpha - (1+\cos\beta)l_\beta}{3-\cos 2\beta} \\ - -2\sin\alpha - \sin(\alpha + \beta) \\ - 2\frac{-(1+\cos\beta)l_\alpha + (3+2\cos\beta)l_\beta}{3-\cos2\beta}\\ - -\sin(\alpha+\beta) - 2\sin(\beta)\frac{(l_\alpha-l_\beta)l_\beta}{3-\cos2\beta} + 2\sin(2\beta)\frac{l_\alpha^2-2(1+\cos\beta)l_\alpha l_\beta + (3+2\cos\beta)l_\beta^2}{(3-\cos2\beta)^2} - \end{pmatrix}$$ - -The Poincaré section here is the collection of $(β,l_β)$ when $α=0$ and $\frac{dα}{dt}>0$. - -#### Hamiltonian of a double pendulum -Now we will plot the Hamiltonian of a double pendulum - -```julia -#Constants and setup -using OrdinaryDiffEq -initial2 = [0.01, 0.005, 0.01, 0.01] -tspan2 = (0.,200.) - -#Define the problem -function double_pendulum_hamiltonian(udot,u,p,t) - α = u[1] - lα = u[2] - β = u[3] - lβ = u[4] - udot .= - [2(lα-(1+cos(β))lβ)/(3-cos(2β)), - -2sin(α) - sin(α+β), - 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)), - -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)] -end - -# Construct a ContiunousCallback -condition(u,t,integrator) = u[1] -affect!(integrator) = nothing -cb = ContinuousCallback(condition,affect!,nothing, - save_positions = (true,false)) - -# Construct Problem -poincare = ODEProblem(double_pendulum_hamiltonian, initial2, tspan2) -sol2 = solve(poincare, Vern9(), save_everystep = false, callback=cb, abstol=1e-9) - -function poincare_map(prob, u₀, p; callback=cb) - _prob = ODEProblem(prob.f,[0.01, 0.01, 0.01, u₀],prob.tspan) - sol = solve(_prob, Vern9(), save_everystep = false, callback=cb, abstol=1e-9) - scatter!(p, sol, vars=(3,4), markersize = 2) -end -``` - -```julia -p = scatter(sol2, vars=(3,4), leg=false, markersize = 2, ylims=(-0.01,0.03)) -for i in -0.01:0.00125:0.01 - poincare_map(poincare, i, p) -end -plot(p,ylims=(-0.01,0.03)) -``` - -## Hénon-Heiles System - -The Hénon-Heiles potential occurs when non-linear motion of a star around a galactic center with the motion restricted to a plane. - -$$ -\begin{align} -\frac{d^2x}{dt^2}&=-\frac{\partial V}{\partial x}\\ -\frac{d^2y}{dt^2}&=-\frac{\partial V}{\partial y} -\end{align} -$$ - -where - -$$V(x,y)={\frac {1}{2}}(x^{2}+y^{2})+\lambda \left(x^{2}y-{\frac {y^{3}}{3}}\right).$$ - -We pick $\lambda=1$ in this case, so - -$$V(x,y) = \frac{1}{2}(x^2+y^2+2x^2y-\frac{2}{3}y^3).$$ - -Then the total energy of the system can be expressed by - -$$E = T+V = V(x,y)+\frac{1}{2}(\dot{x}^2+\dot{y}^2).$$ - -The total energy should conserve as this system evolves. - -```julia -using OrdinaryDiffEq, Plots - -#Setup -initial = [0.,0.1,0.5,0] -tspan = (0,100.) - -#Remember, V is the potential of the system and T is the Total Kinetic Energy, thus E will -#the total energy of the system. -V(x,y) = 1//2 * (x^2 + y^2 + 2x^2*y - 2//3 * y^3) -E(x,y,dx,dy) = V(x,y) + 1//2 * (dx^2 + dy^2); - -#Define the function -function Hénon_Heiles(du,u,p,t) - x = u[1] - y = u[2] - dx = u[3] - dy = u[4] - du[1] = dx - du[2] = dy - du[3] = -x - 2x*y - du[4] = y^2 - y -x^2 -end - -#Pass to solvers -prob = ODEProblem(Hénon_Heiles, initial, tspan) -sol = solve(prob, Vern9(), abs_tol=1e-16, rel_tol=1e-16); -``` - -```julia -# Plot the orbit -plot(sol, vars=(1,2), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false) -``` - -```julia -#Optional Sanity check - what do you think this returns and why? -@show sol.retcode - -#Plot - -plot(sol, vars=(1,3), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity") -plot!(sol, vars=(2,4), leg = false) -``` - -```julia -#We map the Total energies during the time intervals of the solution (sol.u here) to a new vector -#pass it to the plotter a bit more conveniently -energy = map(x->E(x...), sol.u) - -#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great. -@show ΔE = energy[1]-energy[end] - -#Plot -plot(sol.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") -``` - -### Symplectic Integration - -To prevent energy drift, we can instead use a symplectic integrator. We can directly define and solve the `SecondOrderODEProblem`: - -```julia -function HH_acceleration!(dv,v,u,p,t) - x,y = u - dx,dy = dv - dv[1] = -x - 2x*y - dv[2] = y^2 - y -x^2 -end -initial_positions = [0.0,0.1] -initial_velocities = [0.5,0.0] -prob = SecondOrderODEProblem(HH_acceleration!,initial_velocities,initial_positions,tspan) -sol2 = solve(prob, KahanLi8(), dt=1/10); -``` - -Notice that we get the same results: - -```julia -# Plot the orbit -plot(sol2, vars=(3,4), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false) -``` - -```julia -plot(sol2, vars=(3,1), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity") -plot!(sol2, vars=(4,2), leg = false) -``` - -but now the energy change is essentially zero: - -```julia -energy = map(x->E(x[3], x[4], x[1], x[2]), sol2.u) -#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great. -@show ΔE = energy[1]-energy[end] - -#Plot -plot(sol2.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") -``` - -It's so close to zero it breaks GR! And let's try to use a Runge-Kutta-Nyström solver to solve this. Note that Runge-Kutta-Nyström isn't symplectic. - -```julia -sol3 = solve(prob, DPRKN6()); -energy = map(x->E(x[3], x[4], x[1], x[2]), sol3.u) -@show ΔE = energy[1]-energy[end] -gr() -plot(sol3.t, energy, title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") -``` - -Note that we are using the `DPRKN6` sovler at `reltol=1e-3` (the default), yet it has a smaller energy variation than `Vern9` at `abs_tol=1e-16, rel_tol=1e-16`. Therefore, using specialized solvers to solve its particular problem is very efficient. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/models/02-conditional_dosing.jmd b/tutorials/models/02-conditional_dosing.jmd deleted file mode 100644 index 546c5577..00000000 --- a/tutorials/models/02-conditional_dosing.jmd +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: Conditional Dosing Pharmacometric Example -author: Chris Rackauckas ---- - -In this example we will show how to model a conditional dosing using the `DiscreteCallbacks`. The problem is as follows. The patient has a drug `A(t)` in their system. The concentration of the drug is given as `C(t)=A(t)/V` for some volume constant `V`. At `t=4`, the patient goes to the clinic and is checked. If the concentration of the drug in their body is below `4`, then they will receive a new dose. - -For our model, we will use the simple decay equation. We will write this in the in-place form to make it easy to extend to more complicated examples: - -```julia -using DifferentialEquations -function f(du,u,p,t) - du[1] = -u[1] -end -u0 = [10.0] -const V = 1 -prob = ODEProblem(f,u0,(0.0,10.0)) -``` - -Let's see what the solution looks like without any events. - -```julia -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) -``` - -We see that at time `t=4`, the patient should receive a dose. Let's code up that event. We need to check at `t=4` if the concentration `u[1]/4` is `<4`, and if so, add `10` to `u[1]`. We do this with the following: - -```julia -condition(u,t,integrator) = t==4 && u[1]/V<4 -affect!(integrator) = integrator.u[1] += 10 -cb = DiscreteCallback(condition,affect!) -``` - -Now we will give this callback to the solver, and tell it to stop at `t=4` so that way the condition can be checked: - -```julia -sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb) -using Plots; gr() -plot(sol) -``` - -Let's show that it actually added 10 instead of setting the value to 10. We could have set the value using `affect!(integrator) = integrator.u[1] = 10` - -```julia -println(sol(4.00000)) -println(sol(4.000000000001)) -``` - -Now let's model a patient whose decay rate for the drug is lower: - -```julia -function f(du,u,p,t) - du[1] = -u[1]/6 -end -u0 = [10.0] -const V = 1 -prob = ODEProblem(f,u0,(0.0,10.0)) -``` - -```julia -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) -``` - -Under the same criteria, with the same event, this patient will not receive a second dose: - -```julia -sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb) -using Plots; gr() -plot(sol) -``` - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/models/03-diffeqbio_I_introduction.jmd b/tutorials/models/03-diffeqbio_I_introduction.jmd deleted file mode 100644 index e00d3b90..00000000 --- a/tutorials/models/03-diffeqbio_I_introduction.jmd +++ /dev/null @@ -1,267 +0,0 @@ ---- -title: "DiffEqBiological Tutorial I: Introduction" -author: Samuel Isaacson ---- - -DiffEqBiological.jl is a domain specific language (DSL) for writing chemical -reaction networks in Julia. The generated chemical reaction network model can -then be translated into a variety of mathematical models which can be solved -using components of the broader -[DifferentialEquations.jl](http://juliadiffeq.org/) ecosystem. - -In this tutorial we'll provide an introduction to using DiffEqBiological to -specify chemical reaction networks, and then to solve ODE, jump, tau-leaping and -SDE models generated from them. Let's start by using the DiffEqBiological -`reaction_network` macro to specify a simply chemical reaction network; the -well-known Repressilator. - -We first import the basic packages we'll need, and use Plots.jl for making -figures: - -```julia -# If not already installed, first hit "]" within a Julia REPL. Then type: -# add DifferentialEquations DiffEqBiological PyPlot Plots Latexify - -using DifferentialEquations, DiffEqBiological, Plots, Latexify -pyplot(fmt=:svg); -``` - -We now construct the reaction network. The basic types of arrows and predefined -rate laws one can use are discussed in detail within the DiffEqBiological -[Chemical Reaction Models -documentation](http://docs.juliadiffeq.org/dev/models/biological.html). Here -we use a mix of first order, zero order and repressive Hill function rate laws. -Note, $\varnothing$ corresponds to the empty state, and is used for zeroth order -production and first order degradation reactions: - -```julia -repressilator = @reaction_network begin - hillr(P₃,α,K,n), ∅ --> m₁ - hillr(P₁,α,K,n), ∅ --> m₂ - hillr(P₂,α,K,n), ∅ --> m₃ - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - (δ,γ), m₃ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - β, m₃ --> m₃ + P₃ - μ, P₁ --> ∅ - μ, P₂ --> ∅ - μ, P₃ --> ∅ -end α K n δ γ β μ; -``` - -We can use Latexify to look at the corresponding reactions and understand the -generated rate laws for each reaction - -```julia; results="hidden"; -latexify(repressilator; env=:chemical) -``` -```julia; echo=false; skip="notebook"; -mathjax = WEAVE_ARGS[:doctype] == "pdf" ? false : true -x = latexify(repressilator; env=:chemical, starred=true, mathjax=mathjax); -display("text/latex", "$x"); -``` - -We can also use Latexify to look at the corresponding ODE model for the chemical -system - -```julia; results="hidden"; -latexify(repressilator, cdot=false) -``` -```julia; echo=false; skip="notebook"; -x = latexify(repressilator, cdot=false, starred=true); -display("text/latex", "$x"); -``` - -To solve the ODEs we need to specify the values of the parameters in the model, -the initial condition, and the time interval to solve the model on. To do this -it helps to know the orderings of the parameters and the species. Parameters are -ordered in the same order they appear after the `end` statement in the -`@reaction_network` macro. Species are ordered in the order they first appear -within the `@reaction_network` macro. We can see these orderings using the -`speciesmap` and `paramsmap` functions: - -```julia -speciesmap(repressilator) -``` - -```julia -paramsmap(repressilator) -``` - -## Solving the ODEs: -Knowing these orderings, we can create parameter and initial condition vectors, -and setup the `ODEProblem` we want to solve: - -```julia -# parameters [α,K,n,δ,γ,β,μ] -p = (.5, 40, 2, log(2)/120, 5e-3, 20*log(2)/120, log(2)/60) - -# initial condition [m₁,m₂,m₃,P₁,P₂,P₃] -u₀ = [0.,0.,0.,20.,0.,0.] - -# time interval to solve on -tspan = (0., 10000.) - -# create the ODEProblem we want to solve -oprob = ODEProblem(repressilator, u₀, tspan, p) -``` - -At this point we are all set to solve the ODEs. We can now use any ODE solver -from within the DiffEq package. We'll just use the default DifferentialEquations -solver for now, and then plot the solutions: - -```julia -sol = solve(oprob, saveat=10.) -plot(sol, fmt=:svg) -``` - -We see the well-known oscillatory behavior of the repressilator! For more on -choices of ODE solvers, see the JuliaDiffEq -[documentation](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html). - ---- - -## Stochastic Simulation Algorithms (SSAs) for Stochastic Chemical Kinetics -Let's now look at a stochastic chemical kinetics model of the repressilator, -modeling it with jump processes. Here we will construct a DiffEqJump -`JumpProblem` that uses Gillespie's `Direct` method, and then solve it to -generate one realization of the jump process: - -```julia -# first we redefine the initial condition to be integer valued -u₀ = [0,0,0,20,0,0] - -# next we create a discrete problem to encode that our species are integer valued: -dprob = DiscreteProblem(repressilator, u₀, tspan, p) - -# now we create a JumpProblem, and specify Gillespie's Direct Method as the solver: -jprob = JumpProblem(dprob, Direct(), repressilator, save_positions=(false,false)) - -# now let's solve and plot the jump process: -sol = solve(jprob, SSAStepper(), saveat=10.) -plot(sol, fmt=:svg) -``` - -Here we see that oscillations remain, but become much noiser. Note, in -constructing the `JumpProblem` we could have used any of the SSAs that are part -of DiffEqJump instead of the `Direct` method, see the list of SSAs (i.e. -constant rate jump aggregators) in the -[documentation](http://docs.juliadiffeq.org/dev/types/jump_types.html#Constant-Rate-Jump-Aggregators-1). - ---- -## $\tau$-leaping Methods: -While SSAs generate exact realizations for stochastic chemical kinetics jump -process models, [$\tau$-leaping](https://en.wikipedia.org/wiki/Tau-leaping) -methods offer a performant alternative by discretizing in time the underlying -time-change representation of the stochastic process. The DiffEqJump package has -limited support for $\tau$-leaping methods in the form of the basic Euler's -method type approximation proposed by Gillespie. We can simulate a $\tau$-leap -approximation to the repressilator by using the `RegularJump` representation of -the network to construct a `JumpProblem`: - -```julia -rjs = regularjumps(repressilator) -lprob = JumpProblem(dprob, Direct(), rjs) -lsol = solve(lprob, SimpleTauLeaping(), dt=.1) -plot(lsol, plotdensity=1000, fmt=:svg) -``` - ---- -## Chemical Langevin Equation (CLE) Stochastic Differential Equation (SDE) Models: -At an intermediary physical scale between macroscopic ODE models and microscopic -stochastic chemical kinetic models lies the CLE, a SDE version of the model. The -SDEs add to each ODE above a noise term. As the repressilator has species that -get very close to zero in size, it is not a good candidate to model with the CLE -(where solutions can then go negative and become unphysical). Let's create a -simpler reaction network for a birth-death process that will stay non-negative: - -```julia -bdp = @reaction_network begin - c₁, X --> 2X - c₂, X --> 0 - c₃, 0 --> X -end c₁ c₂ c₃ -p = (1.0,2.0,50.) -u₀ = [5.] -tspan = (0.,4.); -``` - -The corresponding Chemical Langevin Equation SDE is then - -```julia; results="hidden"; -latexify(bdp, noise=true, cdot=false) -``` -```julia; echo=false; skip="notebook"; -x = latexify(bdp, noise=true, cdot=false, starred=true); -display("text/latex", "$x"); -``` - -where each $W_i(t)$ denotes an independent Brownian Motion. We can solve the CLE -SDE model by creating an `SDEProblem` and solving it similar to what we did for -ODEs above: - -```julia -# SDEProblem for CLE -sprob = SDEProblem(bdp, u₀, tspan, p) - -# solve and plot, tstops is used to specify enough points -# that the plot looks well-resolved -sol = solve(sprob, tstops=range(0., step=4e-3, length=1001)) -plot(sol, fmt=:svg) -``` - -We again have complete freedom to select any of the -StochasticDifferentialEquations.jl SDE solvers, see the -[documentation](http://docs.juliadiffeq.org/dev/solvers/sde_solve.html). - ---- -## What information can be queried from the reaction_network: -The generated `reaction_network` contains a lot of basic information. For example -- `f=oderhsfun(repressilator)` is a function `f(du,u,p,t)` that given the current - state vector `u` and time `t` fills `du` with the time derivatives of `u` - (i.e. the right hand side of the ODEs). -- `jac=jacfun(repressilator)` is a function `jac(J,u,p,t)` that evaluates and - returns the Jacobian of the ODEs in `J`. A corresponding Jacobian matrix of - expressions can be accessed using the `jacobianexprs` function: -```julia; results="hidden"; -latexify(jacobianexprs(repressilator), cdot=false) -``` -```julia; echo=false; skip="notebook"; -x = latexify(jacobianexprs(repressilator), cdot=false, starred=true); -display("text/latex", "$x"); -``` -- `pjac = paramjacfun(repressilator)` is a function `pjac(pJ,u,p,t)` that - evaluates and returns the Jacobian, `pJ`, of the ODEs *with respect to the - parameters*. This allows `reaction_network`s to be used in the - DifferentialEquations.jl local sensitivity analysis package - [DiffEqSensitivity](http://docs.juliadiffeq.org/dev/analysis/sensitivity.html). - - -By default, generated `ODEProblems` will be passed the corresponding Jacobian -function, which will then be used within implicit ODE/SDE methods. - -The [DiffEqBiological API -documentation](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html) provides -a thorough description of the many query functions that are provided to access -network properties and generated functions. In DiffEqBiological Tutorial II -we'll explore the API. - ---- -## Getting Help -Have a question related to DiffEqBiological or this tutorial? Feel free to ask -in the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby). -If you think you've found a bug in DiffEqBiological, or would like to -request/discuss new functionality, feel free to open an issue on -[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check -there is no related issue already open). If you've found a bug in this tutorial, -or have a suggestion, feel free to open an issue on the [DiffEqTutorials Github -site](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull -request to DiffEqTutorials updating the tutorial! - ---- -```julia; echo=false; skip="notebook" -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) -``` diff --git a/tutorials/models/04-diffeqbio_II_networkproperties.jmd b/tutorials/models/04-diffeqbio_II_networkproperties.jmd deleted file mode 100644 index a686fe96..00000000 --- a/tutorials/models/04-diffeqbio_II_networkproperties.jmd +++ /dev/null @@ -1,488 +0,0 @@ ---- -title: "DiffEqBiological Tutorial II: Network Properties API" -author: Samuel Isaacson ---- - -The [DiffEqBiological -API](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html) provides a -collection of functions for easily accessing network properties, and for -incrementally building and extending a network. In this tutorial we'll go -through the API, and then illustrate how to programmatically construct a -network. - -We'll illustrate the API using a toggle-switch like network that contains a -variety of different reaction types: - -```julia -using DifferentialEquations, DiffEqBiological, Latexify, Plots -fmt = :svg -pyplot(fmt=fmt) -rn = @reaction_network begin - hillr(D₂,α,K,n), ∅ --> m₁ - hillr(D₁,α,K,n), ∅ --> m₂ - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - μ, P₁ --> ∅ - μ, P₂ --> ∅ - (k₊,k₋), 2P₁ ↔ D₁ - (k₊,k₋), 2P₂ ↔ D₂ - (k₊,k₋), P₁+P₂ ↔ T -end α K n δ γ β μ k₊ k₋; -``` - -This corresponds to the chemical reaction network given by - -```julia; results="hidden"; -latexify(rn; env=:chemical) -``` -```julia; echo=false; skip="notebook"; -x = latexify(rn; env=:chemical, starred=true, mathjax=true); -display("text/latex", "$x"); -``` - ---- -## Network Properties -[Basic -properties](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Basic-properties-1) -of the generated network include the `speciesmap` and `paramsmap` functions we -examined in the last tutorial, along with the corresponding `species` and -`params` functions: - -```julia -species(rn) -``` -```julia -params(rn) -``` - -The numbers of species, parameters and reactions can be accessed using -`numspecies(rn)`, `numparams(rn)` and `numreactions(rn)`. - -A number of functions are available to access [properties of -reactions](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Reaction-Properties-1) -within the generated network, including `substrates`, `products`, `dependents`, -`ismassaction`, `substratestoich`, `substratesymstoich`, `productstoich`, -`productsymstoich`, and `netstoich`. Each of these functions takes two -arguments, the reaction network `rn` and the index of the reaction to query -information about. For example, to find the substrate symbols and their -corresponding stoichiometries for the 11th reaction, `2P₁ --> D₁`, we would use - -```julia -substratesymstoich(rn, 11) -``` - -Broadcasting works on all these functions, allowing the construction of a vector -holding the queried information across all reactions, i.e. - -```julia -substratesymstoich.(rn, 1:numreactions(rn)) -``` - -To see the net stoichiometries for all reactions we would use - -```julia -netstoich.(rn, 1:numreactions(rn)) -``` - -Here the first integer in each pair corresponds to the index of the species -(with symbol `species(rn)[index]`). The second integer corresponds to the net -stoichiometric coefficient of the species within the reaction. `substratestoich` -and `productstoich` are defined similarly. - -Several functions are also provided that calculate different types of -[dependency -graphs](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Dependency-Graphs-1). -These include `rxtospecies_depgraph`, which provides a mapping from reaction -index to the indices of species whose population changes when the reaction -occurs: - -```julia -rxtospecies_depgraph(rn) -``` - -Here the last row indicates that the species with indices `[3,4,7]` will change -values when the reaction `T --> P₁ + P₂` occurs. To confirm these are the -correct species we can look at - -```julia -species(rn)[[3,4,7]] -``` - -The `speciestorx_depgraph` similarly provides a mapping from species to reactions -for which their *rate laws* depend on that species. These correspond to all reactions -for which the given species is in the `dependent` set of the reaction. We can verify this -for the first species, `m₁`: - -```julia -speciestorx_depgraph(rn)[1] -``` -```julia -findall(depset -> in(:m₁, depset), dependents.(rn, 1:numreactions(rn))) -``` - -Finally, `rxtorx_depgraph` provides a mapping that shows when a given reaction -occurs, which other reactions have rate laws that involve species whose value -would have changed: - -```julia -rxtorx_depgraph(rn) -``` - -#### Note on Using Network Property API Functions -Many basic network query and reaction property functions are simply accessors, -returning information that is already stored within the generated -`reaction_network`. For these functions, modifying the returned data structures -may lead to inconsistent internal state within the network. As such, they should -be used for accessing, but not modifying, network properties. The [API -documentation](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html) -indicates which functions return newly allocated data structures and which -return data stored within the `reaction_network`. - ---- -## Incremental Construction of Networks -The `@reaction_network` macro is monolithic, in that it not only constructs and -stores basic network properties such as the reaction stoichiometries, but also -generates **everything** needed to immediately solve ODE, SDE and jump models -using the network. This includes Jacobian functions, noise functions, and jump -functions for each reaction. While this allows for a compact interface to the -DifferentialEquations.jl solvers, it can also be computationally expensive for -large networks, where a user may only wish to solve one type of problem and/or -have fine-grained control over what is generated. In addition, some types of -reaction network structures are more amenable to being constructed -programmatically, as opposed to writing out all reactions by hand within one -macro. For these reasons DiffEqBiological provides two additional macros that -only *initially* setup basic reaction network properties, and which can be -extended through a programmatic interface: `@min_reaction_network` and -`@empty_reaction_network`. We now give an introduction to constructing these -more minimal network representations, and how they can be programmatically -extended. See also the relevant [API -section](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Reaction-Network-Generation-Macros-1). - -The `@min_reaction_network` macro works identically to the `@reaction_network` -macro, but the generated network will only be complete with respect to its -representation of chemical network properties (i.e. species, parameters and -reactions). No ODE, SDE or jump models are generated during the macro call. It -can subsequently be extended with the addition of new species, parameters or -reactions. The `@empty_reaction_network` allocates an empty network structure -that can also be extended using the programmatic interface. For example, consider -a partial version of the toggle-switch like network we defined above: - -```julia -rnmin = @min_reaction_network begin - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - μ, P₁ --> ∅ - μ, P₂ --> ∅ -end δ γ β μ; -``` - -Here we have left out the first two, and last three, reactions from the original -`reaction_network`. To expand the network until it is functionally equivalent to -the original model we add back in the missing species, parameters, and *finally* -the missing reactions. Note, it is required that species and parameters be -defined before any reactions using them are added. The necessary network -extension functions are given by `addspecies!`, `addparam!` and `addreaction!`, -and described in the -[API](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Functions-to-Add-Species,-Parameters-and-Reactions-to-a-Network-1). To complete `rnmin` we first add the relevant -species: - -```julia -addspecies!(rnmin, :D₁) -addspecies!(rnmin, :D₂) -addspecies!(rnmin, :T) -``` - -Next we add the needed parameters - -```julia -addparam!(rnmin, :α) -addparam!(rnmin, :K) -addparam!(rnmin, :n) -addparam!(rnmin, :k₊) -addparam!(rnmin, :k₋) -``` - -Note, both `addspecies!` and `addparam!` also accept strings encoding the -variable names (which are then converted to `Symbol`s internally). - -We are now ready to add the missing reactions. The API provides two forms of the -`addreaction!` function, one takes expressions analogous to what one would write -in the macro: - -```julia -addreaction!(rnmin, :(hillr(D₁,α,K,n)), :(∅ --> m₂)) -addreaction!(rnmin, :((k₊,k₋)), :(2P₂ ↔ D₂)) -addreaction!(rnmin, :k₊, :(2P₁ --> D₁)) -addreaction!(rnmin, :k₋, :(D₁ --> 2P₁)) -``` - -The rate can be an expression or symbol as above, but can also just be a -numeric value. The second form of `addreaction!` takes tuples of -`Pair{Symbol,Int}` that encode the stoichiometric coefficients of substrates and -reactants: - -```julia -# signature is addreaction!(rnmin, paramexpr, substratestoich, productstoich) -addreaction!(rnmin, :(hillr(D₂,α,K,n)), (), (:m₁ => 1,)) -addreaction!(rnmin, :k₊, (:P₁=>1, :P₂=>1), (:T=>1,)) -addreaction!(rnmin, :k₋, (:T=>1,), (:P₁=>1, :P₂=>1)) -``` - -Let's check that `rn` and `rnmin` have the same set of species: - -```julia -setdiff(species(rn), species(rnmin)) -``` - -the same set of params: - -```julia -setdiff(params(rn), params(rnmin)) -``` - -and the final reaction has the same substrates, reactions, and rate expression: - -```julia -rxidx = numreactions(rn) -setdiff(substrates(rn, rxidx), substrates(rnmin, rxidx)) -``` -```julia -setdiff(products(rn, rxidx), products(rnmin, rxidx)) -``` -```julia -rateexpr(rn, rxidx) == rateexpr(rnmin, rxidx) -``` - ---- -## Extending Incrementally Generated Networks to Include ODEs, SDEs or Jumps -Once a network generated from `@min_reaction_network` or -`@empty_reaction_network` has had all the associated species, parameters and -reactions filled in, corresponding ODE, SDE or jump models can be constructed. -The relevant API functions are `addodes!`, `addsdes!` and `addjumps!`. One -benefit to contructing models with these functions is that they offer more -fine-grained control over what actually gets constructed. For example, -`addodes!` has the optional keyword argument, `build_jac`, which if set to -`false` will disable construction of symbolic Jacobians and functions for -evaluating Jacobians. For large networks this can give a significant speed-up in -the time required for constructing an ODE model. Each function and its -associated keyword arguments are described in the API section, [Functions to add -ODEs, SDEs or Jumps to a -Network](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Functions-to-Add-ODEs,-SDEs-or-Jumps-to-a-Network-1). - -Let's extend `rnmin` to include the needed functions for use in ODE -solvers: - -```julia -addodes!(rnmin) -``` - -The [Generated Functions for -Models](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Generated-Functions-for-Models-1) -section of the API shows what functions have been generated. For ODEs these -include `oderhsfun(rnmin)`, which returns a function of the form `f(du,u,p,t)` -which evaluates the ODEs (i.e. the time derivatives of `u`) within `du`. For -each generated function, the corresponding expressions from which it was -generated can be retrieved using accessors from the [Generated -Expressions](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Generated-Expressions-1) -section of the API. The equations within `du` can be retrieved using the -`odeexprs(rnmin)` function. For example: - -```julia -odeexprs(rnmin) -``` - -Using Latexify we can see the ODEs themselves to compare with these expressions: - -```julia; results="hidden"; -latexify(rnmin) -``` -```julia; echo=false; skip="notebook"; -x = latexify(rnmin, starred=true); -display("text/latex", "$x"); -``` - -For ODEs two other functions are generated by `addodes!`. `jacfun(rnmin)` will -return the generated Jacobian evaluation function, `fjac(dJ,u,p,t)`, which given -the current solution `u` evaluates the Jacobian within `dJ`. -`jacobianexprs(rnmin)` gives the corresponding matrix of expressions, which can -be used with Latexify to see the Jacobian: - -```julia; results="hidden"; -latexify(jacobianexprs(rnmin)) -``` -```julia; echo=false; skip="notebook"; -x = latexify(jacobianexprs(rnmin), starred=true); -display("text/latex", "$x"); -``` - -`addodes!` also generates a function that evaluates the Jacobian of the ODE -derivative functions with respect to the parameters. `paramjacfun(rnmin)` then -returns the generated function. It has the form `fpjac(dPJ,u,p,t)`, which -given the current solution `u` evaluates the Jacobian matrix with respect to -parameters `p` within `dPJ`. For use in DifferentialEquations.jl solvers, an -[`ODEFunction`](http://docs.juliadiffeq.org/dev/features/performance_overloads.html) -representation of the ODEs is available from `odefun(rnmin)`. - -`addsdes!` and `addjumps!` work similarly to complete the network for use in -StochasticDiffEq and DiffEqJump solvers. - -#### Note on Using Generated Function and Expression API Functions -The generated functions and expressions accessible through the API require first -calling the appropriate `addodes!`, `addsdes` or `addjumps` function. These are -responsible for actually constructing the underlying functions and expressions. -The API accessors simply return already constructed functions and expressions -that are stored within the `reaction_network` structure. - ---- -## Example of Generating a Network Programmatically -For a user directly typing in a reaction network, it is generally easier to use -the `@min_reaction_network` or `@reaction_network` macros to fully specify -reactions. However, for large, structured networks it can be much easier to -generate the network programmatically. For very large networks, with tens of -thousands of reactions, the form of `addreaction!` that uses stoichiometric -coefficients should be preferred as it offers substantially better performance. -To put together everything we've seen, let's generate the network corresponding -to a 1D continuous time random walk, approximating the diffusion of molecules -within an interval. - -The basic "reaction" network we wish to study is - -$$ -u_1 \leftrightarrows u_2 \leftrightarrows u_3 \cdots \leftrightarrows u_{N} -$$ - -for $N$ lattice sites on $[0,1]$. For $h = 1/N$ the lattice spacing, we'll -assume the rate molecules hop from their current site to any particular neighbor -is just $h^{-2}$. We can interpret this hopping process as a collection of -$2N-2$ "reactions", with the form $u_i \to u_j$ for $j=i+1$ or $j=i-1$. We construct -the corresponding reaction network as follows. First we set values for the basic -parameters: -```julia -N = 64 -h = 1 / N -``` - -then we create an empty network, and add each species - -```julia -rn = @empty_reaction_network - -for i = 1:N - addspecies!(rn, Symbol(:u, i)) -end -``` - -We next add one parameter `β`, which we will set equal to the hopping rate -of molecules, $h^{-2}$: - -```julia -addparam!(rn, :β) -``` - -Finally, we add in the $2N-2$ possible hopping reactions: -```julia -for i = 1:N - (i < N) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i+1)=>1,)) - (i > 1) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i-1)=>1,)) -end -``` - -Let's first construct an ODE model for the network - -```julia -addodes!(rn) -``` - -We now need to specify the initial condition, parameter vector and time interval -to solve on. We start with 10000 molecules placed at the center of the domain, -and setup an `ODEProblem` to solve: - -```julia -u₀ = zeros(N) -u₀[div(N,2)] = 10000 -p = [1/(h*h)] -tspan = (0.,.01) -oprob = ODEProblem(rn, u₀, tspan, p) -``` - -We are now ready to solve the problem and plot the solution. Since we have -essentially generated a method of lines discretization of the diffusion equation -with a discontinuous initial condition, we'll use an A-L stable implicit ODE -solver, `Rodas5`, and plot the solution at a few times: - -```julia -sol = solve(oprob, Rodas5()) -times = [0., .0001, .001, .01] -plt = plot() -for time in times - plot!(plt, 1:N, sol(time), fmt=fmt, xlabel="i", ylabel="uᵢ", label=string("t = ", time), lw=3) -end -plot(plt, ylims=(0.,10000.)) -``` - -Here we see the characteristic diffusion of molecules from the center of the -domain, resulting in a shortening and widening of the solution as $t$ increases. - -Let's now look at a stochastic chemical kinetics jump process version of the -model, where β gives the probability per time each molecule can hop from its -current lattice site to an individual neighboring site. We first add in the -jumps, disabling `regular_jumps` since they are not needed, and using the -`minimal_jumps` flag to construct a minimal representation of the needed jumps. -We then construct a `JumpProblem`, and use the Composition-Rejection Direct -method, `DirectCR`, to simulate the process of the molecules hopping about on -the lattice: - -```julia -addjumps!(rn, build_regular_jumps=false, minimal_jumps=true) - -# make the initial condition integer valued -u₀ = zeros(Int, N) -u₀[div(N,2)] = 10000 - -# setup and solve the problem -dprob = DiscreteProblem(rn, u₀, tspan, p) -jprob = JumpProblem(dprob, DirectCR(), rn, save_positions=(false,false)) -jsol = solve(jprob, SSAStepper(), saveat=times) -``` - -We can now plot bar graphs showing the locations of the molecules at the same -set of times we examined the ODE solution. For comparison, we also plot the -corresponding ODE solutions (red lines) that we found: -```julia -times = [0., .0001, .001, .01] -plts = [] -for i = 1:4 - b = bar(1:N, jsol[i], legend=false, fmt=fmt, xlabel="i", ylabel="uᵢ", title=string("t = ", times[i])) - plot!(b,sol(times[i])) - push!(plts,b) -end -plot(plts...) -``` - -Similar to the ODE solutions, we see that the molecules spread out and become -more and more well-mixed throughout the domain as $t$ increases. The simulation -results are noisy due to the finite numbers of molecules present in the -stochsatic simulation, but since the number of molecules is large they agree -well with the ODE solution at each time. - ---- -## Getting Help -Have a question related to DiffEqBiological or this tutorial? Feel free to ask -in the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby). -If you think you've found a bug in DiffEqBiological, or would like to -request/discuss new functionality, feel free to open an issue on -[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check -there is no related issue already open). If you've found a bug in this tutorial, -or have a suggestion, feel free to open an issue on the [DiffEqTutorials Github -site](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull -request to DiffEqTutorials updating the tutorial! - ---- -```julia; echo=false; skip="notebook" -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) -``` diff --git a/tutorials/models/04b-diffeqbio_III_steadystates.jmd b/tutorials/models/04b-diffeqbio_III_steadystates.jmd deleted file mode 100644 index 079644db..00000000 --- a/tutorials/models/04b-diffeqbio_III_steadystates.jmd +++ /dev/null @@ -1,191 +0,0 @@ ---- -title: "DiffEqBiological Tutorial III: Steady-States and Bifurcations" -author: Torkel Loman and Samuel Isaacson ---- - -Several types of steady state analysis can be performed for networks defined -with DiffEqBiological by utilizing homotopy continuation. This allows for -finding the steady states and bifurcations within a large class of systems. In -this tutorial we'll go through several examples of using this functionality. - -We start by loading the necessary packages: -```julia -using DiffEqBiological, Plots -gr(); default(fmt = :png); -``` - -### Steady states and stability of a biochemical reaction network. -Bistable switches are well known biological motifs, characterised by the -presence of two different stable steady states. - -```julia -bistable_switch = @reaction_network begin - d, (X,Y) → ∅ - hillR(Y,v1,K1,n1), ∅ → X - hillR(X,v2,K2,n2), ∅ → Y -end d v1 K1 n1 v2 K2 n2 -d = 0.01; -v1 = 1.5; K1 = 30; n1 = 3; -v2 = 1.; K2 = 30; n2 = 3; -bistable_switch_p = [d, v1 ,K1, n1, v2, K2, n2]; -``` - -The steady states can be found using the `steady_states` function (which takes a reaction network and a set of parameter values as input). The stability of these steady states can be found using the `stability` function. - -```julia -ss = steady_states(bistable_switch, bistable_switch_p) -``` - -```julia -stability(ss,bistable_switch, bistable_switch_p) -``` - -Since the equilibration methodology is based on homotopy continuation, it is not -able to handle systems with non-integer exponents, or non polynomial reaction -rates. Neither of the following two systems will work. - -This system contains a non-integer exponent: -```julia -rn1 = @reaction_network begin - p, ∅ → X - hill(X,v,K,n), X → ∅ -end p v K n -p1 = [1.,2.5,1.5,1.5] -steady_states(rn1,p1) -``` - -This system contains a logarithmic reaction rate: -```julia -rn2 = @reaction_network begin - p, ∅ → X - log(X), X → ∅ -end p -p2 = [1.] -steady_states(rn2,p2) -``` - -### Bifurcation diagrams for biochemical reaction networks -Bifurcation diagrams illustrate how the steady states of a system depend on one -or more parameters. They can be computed with the `bifurcations` function. It -takes the same arguments as `steady_states`, with the addition of the parameter -one wants to vary, and an interval over which to vary it: - -```julia -bif = bifurcations(bistable_switch, bistable_switch_p, :v1, (.1,5.)) -plot(bif,ylabel="[X]",label="") -plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) -``` - -The values for the second variable in the system can also be displayed, by -giving that as an additional input to `plot` (it is the second argument, directly -after the bifurcation diagram object): - -```julia -plot(bif,2,ylabel="[Y]") -plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) -``` - -The `plot` function also accepts all other arguments which the Plots.jl `plot` function accepts. - -```julia -bif = bifurcations(bistable_switch, bistable_switch_p,:v1,(.1,10.)) -plot(bif,linewidth=1.,title="A bifurcation diagram",ylabel="Steady State concentration") -plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) -``` - -Certain parameters, like `n1`, cannot be sensibly varied over a continuous -interval. Instead, a discrete bifurcation diagram can be calculated with the -`bifurcation_grid` function. Instead of an interval, the last argument is a -range of numbers: - -```julia -bif = bifurcation_grid(bistable_switch, bistable_switch_p,:n1,1.:5.) -plot(bif) -scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) -``` - -### Bifurcation diagrams over two dimensions -In addition to the bifurcation diagrams illustrated above, where only a single -variable is varied, it is also possible to investigate the steady state -properties of s system as two different parameters are varied. Due to the nature -of the underlying bifurcation algorithm it is not possible to continuously vary -both parameters. Instead, a set of discrete values are selected for the first -parameter, and a continuous interval for the second. Next, for each discrete -value of the first parameter, a normal bifurcation diagram is created over the -interval given for the second parameter. - -```julia -bif = bifurcation_grid_diagram(bistable_switch, bistable_switch_p,:n1,0.:4.,:v1,(.1,5.)) -plot(bif) -plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) -``` - -In the single variable case we could use a `bifurcation_grid` to investigate the -behavior of a parameter which could only attain discrete values. In the same -way, if we are interested in two parameters, both of which require integer -values, we can use `bifrucation_grid_2d`. In our case, this is required if we -want to vary both the parameters `n1` and `n2`: - -```julia -bif = bifurcation_grid_2d(bistable_switch, bistable_switch_p,:n1,1.:3.,:n2,1.:10.) -plot(bif) -scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) -``` - -### The Brusselator -The Brusselator is a well know reaction network, which may or may not oscillate, -depending on parameter values. - -```julia -brusselator = @reaction_network begin - A, ∅ → X - 1, 2X + Y → 3X - B, X → Y - 1, X → ∅ -end A B; -A = 0.5; B = 4.; -brusselator_p = [A, B]; -``` - -The system has only one steady state, for $(X,Y)=(A,B/A)$ This fixed point -becomes unstable when $B > 1+A^2$, leading to oscillations. Bifurcation diagrams -can be used to determine the system's stability, and hence look for where oscillations might appear in the Brusselator: - -```julia -bif = bifurcations(brusselator,brusselator_p,:B,(0.1,2.5)) -plot(bif,2) -plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"]) -``` - -Here red and yellow colors label unstable steady-states, while blue and cyan -label stable steady-states. (In addition, yellow and cyan correspond to points -where at least one eigenvalue of the Jacobian is imaginary, while red and blue -correspond to points with real-valued eigenvalues.) - -Given `A=0.5`, the point at which the system should become unstable is `B=1.25`. We can confirm this in the bifurcation diagram. - -We can also investigate the behavior when we vary both parameters of the system: - -```julia -bif = bifurcation_grid_diagram(brusselator,brusselator_p,:B,0.5:0.02:5.0,:A,(0.2,5.0)) -plot(bif) -plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"]) -``` - ---- -## Getting Help -Have a question related to DiffEqBiological or this tutorial? Feel free to ask -in the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby). -If you think you've found a bug in DiffEqBiological, or would like to -request/discuss new functionality, feel free to open an issue on -[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check -there is no related issue already open). If you've found a bug in this tutorial, -or have a suggestion, feel free to open an issue on the [DiffEqTutorials Github -site](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull -request to DiffEqTutorials updating the tutorial! - ---- -```julia; echo=false; skip="notebook" -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) -``` diff --git a/tutorials/models/05-kepler_problem.jmd b/tutorials/models/05-kepler_problem.jmd deleted file mode 100644 index abcbc12d..00000000 --- a/tutorials/models/05-kepler_problem.jmd +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: Kepler Problem -author: Yingbo Ma, Chris Rackauckas ---- - -The Hamiltonian $\mathcal {H}$ and the angular momentum $L$ for the Kepler problem are - -$$\mathcal {H} = \frac{1}{2}(\dot{q}^2_1+\dot{q}^2_2)-\frac{1}{\sqrt{q^2_1+q^2_2}},\quad -L = q_1\dot{q_2} - \dot{q_1}q_2$$ - -Also, we know that - -$${\displaystyle {\frac {\mathrm {d} {\boldsymbol {p}}}{\mathrm {d} t}}=-{\frac {\partial {\mathcal {H}}}{\partial {\boldsymbol {q}}}}\quad ,\quad {\frac {\mathrm {d} {\boldsymbol {q}}}{\mathrm {d} t}}=+{\frac {\partial {\mathcal {H}}}{\partial {\boldsymbol {p}}}}}$$ - -```julia -using OrdinaryDiffEq, LinearAlgebra, ForwardDiff, Plots; gr() -H(q,p) = norm(p)^2/2 - inv(norm(q)) -L(q,p) = q[1]*p[2] - p[1]*q[2] - -pdot(dp,p,q,params,t) = ForwardDiff.gradient!(dp, q->-H(q, p), q) -qdot(dq,p,q,params,t) = ForwardDiff.gradient!(dq, p-> H(q, p), p) - -initial_position = [.4, 0] -initial_velocity = [0., 2.] -initial_cond = (initial_position, initial_velocity) -initial_first_integrals = (H(initial_cond...), L(initial_cond...)) -tspan = (0,20.) -prob = DynamicalODEProblem(pdot, qdot, initial_velocity, initial_position, tspan) -sol = solve(prob, KahanLi6(), dt=1//10); -``` - -Let's plot the orbit and check the energy and angular momentum variation. We know that energy and angular momentum should be constant, and they are also called first integrals. - -```julia -plot_orbit(sol) = plot(sol,vars=(3,4), lab="Orbit", title="Kepler Problem Solution") - -function plot_first_integrals(sol, H, L) - plot(initial_first_integrals[1].-map(u->H(u[2,:], u[1,:]), sol.u), lab="Energy variation", title="First Integrals") - plot!(initial_first_integrals[2].-map(u->L(u[2,:], u[1,:]), sol.u), lab="Angular momentum variation") -end -analysis_plot(sol, H, L) = plot(plot_orbit(sol), plot_first_integrals(sol, H, L)) -``` - -```julia -analysis_plot(sol, H, L) -``` - -Let's try to use a Runge-Kutta-Nyström solver to solve this problem and check the first integrals' variation. - -```julia -sol2 = solve(prob, DPRKN6()) # dt is not necessary, because unlike symplectic - # integrators DPRKN6 is adaptive -@show sol2.u |> length -analysis_plot(sol2, H, L) -``` - -Let's then try to solve the same problem by the `ERKN4` solver, which is specialized for sinusoid-like periodic function - -```julia -sol3 = solve(prob, ERKN4()) # dt is not necessary, because unlike symplectic - # integrators ERKN4 is adaptive -@show sol3.u |> length -analysis_plot(sol3, H, L) -``` - -We can see that `ERKN4` does a bad job for this problem, because this problem is not sinusoid-like. - -One advantage of using `DynamicalODEProblem` is that it can implicitly convert the second order ODE problem to a *normal* system of first order ODEs, which is solvable for other ODE solvers. Let's use the `Tsit5` solver for the next example. - -```julia -sol4 = solve(prob, Tsit5()) -@show sol4.u |> length -analysis_plot(sol4, H, L) -``` - -#### Note - -There is drifting for all the solutions, and high order methods are drifting less because they are more accurate. - -### Conclusion - ---- - -Symplectic integrator does not conserve the energy completely at all time, but the energy can come back. In order to make sure that the energy fluctuation comes back eventually, symplectic integrator has to have a fixed time step. Despite the energy variation, symplectic integrator conserves the angular momentum perfectly. - -Both Runge-Kutta-Nyström and Runge-Kutta integrator do not conserve energy nor the angular momentum, and the first integrals do not tend to come back. An advantage Runge-Kutta-Nyström integrator over symplectic integrator is that RKN integrator can have adaptivity. An advantage Runge-Kutta-Nyström integrator over Runge-Kutta integrator is that RKN integrator has less function evaluation per step. The `ERKN4` solver works best for sinusoid-like solutions. - -## Manifold Projection - -In this example, we know that energy and angular momentum should be conserved. We can achieve this through mainfold projection. As the name implies, it is a procedure to project the ODE solution to a manifold. Let's start with a base case, where mainfold projection isn't being used. - -```julia -using DiffEqCallbacks - -plot_orbit2(sol) = plot(sol,vars=(1,2), lab="Orbit", title="Kepler Problem Solution") - -function plot_first_integrals2(sol, H, L) - plot(initial_first_integrals[1].-map(u->H(u[1:2],u[3:4]), sol.u), lab="Energy variation", title="First Integrals") - plot!(initial_first_integrals[2].-map(u->L(u[1:2],u[3:4]), sol.u), lab="Angular momentum variation") -end - -analysis_plot2(sol, H, L) = plot(plot_orbit2(sol), plot_first_integrals2(sol, H, L)) - -function hamiltonian(du,u,params,t) - q, p = u[1:2], u[3:4] - qdot(@view(du[1:2]), p, q, params, t) - pdot(@view(du[3:4]), p, q, params, t) -end - -prob2 = ODEProblem(hamiltonian, [initial_position; initial_velocity], tspan) -sol_ = solve(prob2, RK4(), dt=1//5, adaptive=false) -analysis_plot2(sol_, H, L) -``` - -There is a significant fluctuation in the first integrals, when there is no mainfold projection. - -```julia -function first_integrals_manifold(residual,u) - residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4]) - residual[3:4] .= initial_first_integrals[2] - L(u[1:2], u[3:4]) -end - -cb = ManifoldProjection(first_integrals_manifold) -sol5 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=cb) -analysis_plot2(sol5, H, L) -``` - -We can see that thanks to the manifold projection, the first integrals' variation is very small, although we are using `RK4` which is not symplectic. But wait, what if we only project to the energy conservation manifold? - -```julia -function energy_manifold(residual,u) - residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4]) - residual[3:4] .= 0 -end -energy_cb = ManifoldProjection(energy_manifold) -sol6 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=energy_cb) -analysis_plot2(sol6, H, L) -``` - -There is almost no energy variation but angular momentum varies quite bit. How about only project to the angular momentum conservation manifold? - -```julia -function angular_manifold(residual,u) - residual[1:2] .= initial_first_integrals[2] - L(u[1:2], u[3:4]) - residual[3:4] .= 0 -end -angular_cb = ManifoldProjection(angular_manifold) -sol7 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=angular_cb) -analysis_plot2(sol7, H, L) -``` - -Again, we see what we expect. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/models/06-pendulum_bayesian_inference.jmd b/tutorials/models/06-pendulum_bayesian_inference.jmd deleted file mode 100644 index 380bb816..00000000 --- a/tutorials/models/06-pendulum_bayesian_inference.jmd +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: Bayesian Inference on a Pendulum using Turing.jl -author: Vaibhav Dixit ---- - -### Set up simple pendulum problem - -```julia -using DiffEqBayes, OrdinaryDiffEq, RecursiveArrayTools, Distributions, Plots, StatsPlots -``` - -Let's define our simple pendulum problem. Here our pendulum has a drag term `ω` -and a length `L`. - -![pendulum](https://user-images.githubusercontent.com/1814174/59942945-059c1680-942f-11e9-991c-2025e6e4ccd3.jpg) - -We get first order equations by defining the first term as the velocity and the -second term as the position, getting: - -```julia -function pendulum(du,u,p,t) - ω,L = p - x,y = u - du[1] = y - du[2] = - ω*y -(9.8/L)*sin(x) -end - -u0 = [1.0,0.1] -tspan = (0.0,10.0) -prob1 = ODEProblem(pendulum,u0,tspan,[1.0,2.5]) -``` - -### Solve the model and plot - -To understand the model and generate data, let's solve and visualize the solution -with the known parameters: - -```julia -sol = solve(prob1,Tsit5()) -plot(sol) -``` - -It's the pendulum, so you know what it looks like. It's periodic, but since we -have not made a small angle assumption it's not exactly `sin` or `cos`. Because -the true dampening parameter `ω` is 1, the solution does not decay over time, -nor does it increase. The length `L` determines the period. - -### Create some dummy data to use for estimation - -We now generate some dummy data to use for estimation - -```julia -t = collect(range(1,stop=10,length=10)) -randomized = VectorOfArray([(sol(t[i]) + .01randn(2)) for i in 1:length(t)]) -data = convert(Array,randomized) -``` - -Let's see what our data looks like on top of the real solution - -```julia -scatter!(data') -``` - -This data captures the non-dampening effect and the true period, making it -perfect to attempting a Bayesian inference. - -### Perform Bayesian Estimation - -Now let's fit the pendulum to the data. Since we know our model is correct, -this should give us back the parameters that we used to generate the data! -Define priors on our parameters. In this case, let's assume we don't have much -information, but have a prior belief that ω is between 0.1 and 3.0, while the -length of the pendulum L is probably around 3.0: - -```julia -priors = [Uniform(0.1,3.0), Normal(3.0,1.0)] -``` - -Finally let's run the estimation routine from DiffEqBayes.jl using the Turing.jl backend - -```julia -bayesian_result = turing_inference(prob1,Tsit5(),t,data,priors;num_samples=10_000, - syms = [:omega,:L]) -``` - -Notice that while our guesses had the wrong means, the learned parameters converged -to the correct means, meaning that it learned good posterior distributions for the -parameters. To look at these posterior distributions on the parameters, we can -examine the chains: - -```julia -plot(bayesian_result) -``` - -As a diagnostic, we will also check the parameter chains. The chain is the MCMC -sampling process. The chain should explore parameter space and converge reasonably -well, and we should be taking a lot of samples after it converges (it is these -samples that form the posterior distribution!) - -```julia -plot(bayesian_result, colordim = :parameter) -``` - -Notice that after awhile these chains converge to a "fuzzy line", meaning it -found the area with the most likelihood and then starts to sample around there, -which builds a posterior distribution around the true mean. diff --git a/tutorials/models/07-outer_solar_system.jmd b/tutorials/models/07-outer_solar_system.jmd deleted file mode 100644 index e31fa728..00000000 --- a/tutorials/models/07-outer_solar_system.jmd +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: The Outer Solar System -author: Yingbo Ma, Chris Rackauckas ---- - -## Data - -The chosen units are: masses relative to the sun, so that the sun has mass $1$. We have taken $m_0 = 1.00000597682$ to take account of the inner planets. Distances are in astronomical units , times in earth days, and the gravitational constant is thus $G = 2.95912208286 \cdot 10^{-4}$. - -| planet | mass | initial position | initial velocity | -| --- | --- | --- | --- | -| Jupiter | $m_1 = 0.000954786104043$ |
  • -3.5023653
  • -3.8169847
  • -1.5507963
|
  • 0.00565429
  • -0.00412490
  • -0.00190589
-| Saturn | $m_2 = 0.000285583733151$ |
  • 9.0755314
  • -3.0458353
  • -1.6483708
|
  • 0.00168318
  • 0.00483525
  • 0.00192462
-| Uranus | $m_3 = 0.0000437273164546$ |
  • 8.3101420
  • -16.2901086
  • -7.2521278
|
  • 0.00354178
  • 0.00137102
  • 0.00055029
-| Neptune | $m_4 = 0.0000517759138449$ |
  • 11.4707666
  • -25.7294829
  • -10.8169456
|
  • 0.00288930
  • 0.00114527
  • 0.00039677
-| Pluto | $ m_5 = 1/(1.3 \cdot 10^8 )$ |
  • -15.5387357
  • -25.2225594
  • -3.1902382
|
  • 0.00276725
  • -0.00170702
  • -0.00136504
- -The data is taken from the book "Geometric Numerical Integration" by E. Hairer, C. Lubich and G. Wanner. - -```julia -using Plots, OrdinaryDiffEq, DiffEqPhysics, RecursiveArrayTools -gr() - -G = 2.95912208286e-4 -M = [1.00000597682, 0.000954786104043, 0.000285583733151, 0.0000437273164546, 0.0000517759138449, 1/1.3e8] -planets = ["Sun", "Jupiter", "Saturn", "Uranus", "Neptune", "Pluto"] - -pos_x = [0.0,-3.5023653,9.0755314,8.3101420,11.4707666,-15.5387357] -pos_y = [0.0,-3.8169847,-3.0458353,-16.2901086,-25.7294829,-25.2225594] -pos_z = [0.0,-1.5507963,-1.6483708,-7.2521278,-10.8169456,-3.1902382] -pos = ArrayPartition(pos_x,pos_y,pos_z) - -vel_x = [0.0,0.00565429,0.00168318,0.00354178,0.00288930,0.00276725] -vel_y = [0.0,-0.00412490,0.00483525,0.00137102,0.00114527,-0.00170702] -vel_z = [0.0,-0.00190589,0.00192462,0.00055029,0.00039677,-0.00136504] -vel = ArrayPartition(vel_x,vel_y,vel_z) - -tspan = (0.,200_000) -``` - -The N-body problem's Hamiltonian is - -$$H(p,q) = \frac{1}{2}\sum_{i=0}^{N}\frac{p_{i}^{T}p_{i}}{m_{i}} - G\sum_{i=1}^{N}\sum_{j=0}^{i-1}\frac{m_{i}m_{j}}{\left\lVert q_{i}-q_{j} \right\rVert}$$ - -Here, we want to solve for the motion of the five outer planets relative to the sun, namely, Jupiter, Saturn, Uranus, Neptune and Pluto. - -```julia -const ∑ = sum -const N = 6 -potential(p, t, x, y, z, M) = -G*∑(i->∑(j->(M[i]*M[j])/sqrt((x[i]-x[j])^2 + (y[i]-y[j])^2 + (z[i]-z[j])^2), 1:i-1), 2:N) -``` - -## Hamiltonian System - -`NBodyProblem` constructs a second order ODE problem under the hood. We know that a Hamiltonian system has the form of - -$$\dot{p} = -H_{q}(p,q)\quad \dot{q}=H_{p}(p,q)$$ - -For an N-body system, we can symplify this as: - -$$\dot{p} = -\nabla{V}(q)\quad \dot{q}=M^{-1}p.$$ - -Thus $\dot{q}$ is defined by the masses. We only need to define $\dot{p}$, and this is done internally by taking the gradient of $V$. Therefore, we only need to pass the potential function and the rest is taken care of. - -```julia -nprob = NBodyProblem(potential, M, pos, vel, tspan) -sol = solve(nprob,Yoshida6(), dt=100); -``` - -```julia -orbitplot(sol,body_names=planets) -``` - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/ode_extras/01-ModelingToolkit.jmd b/tutorials/ode_extras/01-ModelingToolkit.jmd deleted file mode 100644 index d872ae13..00000000 --- a/tutorials/ode_extras/01-ModelingToolkit.jmd +++ /dev/null @@ -1,270 +0,0 @@ ---- -title: ModelingToolkit.jl, An IR and Compiler for Scientific Models -author: Chris Rackauckas ---- - -A lot of people are building modeling languages for their specific domains. However, while the syntax my vary greatly between these domain-specific languages (DSLs), the internals of modeling frameworks are surprisingly similar: building differential equations, calculating Jacobians, etc. - -#### ModelingToolkit.jl is metamodeling systemitized - -After building our third modeling interface, we realized that this problem can be better approached by having a reusable internal structure which DSLs can target. This internal is ModelingToolkit.jl: an Intermediate Representation (IR) with a well-defined interface for defining system transformations and compiling to Julia functions for use in numerical libraries. Now a DSL can easily be written by simply defining the translation to ModelingToolkit.jl's primatives and querying for the mathematical quantities one needs. - -### Basic usage: defining differential equation systems, with performance! - -Let's explore the IR itself. ModelingToolkit.jl is friendly to use, and can used as a symbolic DSL in its own right. Let's define and solve the Lorenz differential equation system using ModelingToolkit to generate the functions: - -```{julia;line_width = 130} -using ModelingToolkit - -### Define a differential equation system - -@parameters t σ ρ β -@variables x(t) y(t) z(t) -@derivatives D'~t - -eqs = [D(x) ~ σ*(y-x), - D(y) ~ x*(ρ-z)-y, - D(z) ~ x*y - β*z] -de = ODESystem(eqs) -ode_f = ODEFunction(de, [x,y,z], [σ,ρ,β]) - -### Use in DifferentialEquations.jl - -using OrdinaryDiffEq -u₀ = ones(3) -tspan = (0.0,100.0) -p = [10.0,28.0,10/3] -prob = ODEProblem(ode_f,u₀,tspan,p) -sol = solve(prob,Tsit5()) - -using Plots -plot(sol,vars=(1,2,3)) -``` - -### ModelingToolkit is a compiler for mathematical systems - -At its core, ModelingToolkit is a compiler. It's IR is its type system, and its output are Julia functions (it's a compiler for Julia code to Julia code, written in Julia). - -DifferentialEquations.jl wants a function `f(du,u,p,t)` for defining an ODE system, which is what ModelingToolkit.jl is building. - -```{julia;line_width = 130} -generate_function(de, [x,y,z], [σ,ρ,β]) -``` - -A special syntax in DifferentialEquations.jl for small static ODE systems uses `f(u,p,t)`, which can be generated as well: - -```{julia;line_width = 130} -generate_function(de, [x,y,z], [σ,ρ,β]; version=ModelingToolkit.SArrayFunction) -``` - -ModelingToolkit.jl can be used to calculate the Jacobian of the differential equation system: - -```{julia;line_width = 130} -jac = calculate_jacobian(de) -``` - -It will automatically generate functions for using this Jacobian within the stiff ODE solvers for faster solving: - -```{julia;line_width = 130} -jac_expr = generate_jacobian(de) -``` - -It can even do fancy linear algebra. Stiff ODE solvers need to perform an LU-factorization which is their most expensive part. But ModelingToolkit.jl can skip this operation and instead generate the analytical solution to a matrix factorization, and build a Julia function for directly computing the factorization, which is then optimized in LLVM compiler passes. - -```{julia;line_width = 130} -ModelingToolkit.generate_factorized_W(de)[1] -``` - -### Solving Nonlinear systems - -ModelingToolkit.jl is not just for differential equations. It can be used for any mathematical target that is representable by its IR. For example, let's solve a rootfinding problem `F(x)=0`. What we do is define a nonlinear system and generate a function for use in NLsolve.jl - -```{julia;line_width = 130} -@variables x y z -@parameters σ ρ β - -# Define a nonlinear system -eqs = [0 ~ σ*(y-x), - 0 ~ x*(ρ-z)-y, - 0 ~ x*y - β*z] -ns = NonlinearSystem(eqs, [x,y,z]) -nlsys_func = generate_function(ns, [x,y,z], [σ,ρ,β]) -``` - -We can then tell ModelingToolkit.jl to compile this function for use in NLsolve.jl, and then numerically solve the rootfinding problem: - -```{julia;line_width = 130} -nl_f = @eval eval(nlsys_func) -# Make a closure over the parameters for for NLsolve.jl -f2 = (du,u) -> nl_f(du,u,(10.0,26.0,2.33)) - -using NLsolve -nlsolve(f2,ones(3)) -``` - -### Library of transformations on mathematical systems - -The reason for using ModelingToolkit is not just for defining performant Julia functions for solving systems, but also for performing mathematical transformations which may be required in order to numerically solve the system. For example, let's solve a third order ODE. The way this is done is by transforming the third order ODE into a first order ODE, and then solving the resulting ODE. This transformation is given by the `ode_order_lowering` function. - -```{julia;line_width = 130} -@derivatives D3'''~t -@derivatives D2''~t -@variables u(t), x(t) -eqs = [D3(u) ~ 2(D2(u)) + D(u) + D(x) + 1 - D2(x) ~ D(x) + 2] -de = ODESystem(eqs) -de1 = ode_order_lowering(de) -``` - -```{julia;line_width = 130} -de1.eqs -``` - -This has generated a system of 5 first order ODE systems which can now be used in the ODE solvers. - -### Linear Algebra... for free? - -Let's take a look at how to extend ModelingToolkit.jl in new directions. Let's define a Jacobian just by using the derivative primatives by hand: - -```{julia;line_width = 130} -@parameters t σ ρ β -@variables x(t) y(t) z(t) -@derivatives D'~t Dx'~x Dy'~y Dz'~z -eqs = [D(x) ~ σ*(y-x), - D(y) ~ x*(ρ-z)-y, - D(z) ~ x*y - β*z] -J = [Dx(eqs[1].rhs) Dy(eqs[1].rhs) Dz(eqs[1].rhs) - Dx(eqs[2].rhs) Dy(eqs[2].rhs) Dz(eqs[2].rhs) - Dx(eqs[3].rhs) Dy(eqs[3].rhs) Dz(eqs[3].rhs)] -``` - -Notice that this writes the derivatives in a "lazy" manner. If we want to actually compute the derivatives, we can expand out those expressions: - -```{julia;line_width = 130} -J = expand_derivatives.(J) -``` - -Here's the magic of ModelingToolkit.jl: **Julia treats ModelingToolkit expressions like a Number, and so generic numerical functions are directly usable on ModelingToolkit expressions!** Let's compute the LU-factorization of this Jacobian we defined using Julia's Base linear algebra library. - -```{julia;line_width = 130} -using LinearAlgebra -luJ = lu(J) -``` - -```{julia;line_width = 130} -luJ.L -``` - -and the inverse? - -```{julia;line_width = 130} -invJ = inv(J) -``` - -#### Thus ModelingToolkit.jl can utilize existing numerical code on symbolic codes - -Let's follow this thread a little deeper. - -### Automatically convert numerical codes to symbolic - -Let's take someone's code written to numerically solve the Lorenz equation: - -```{julia;line_width = 130} -function lorenz(du,u,p,t) - du[1] = p[1]*(u[2]-u[1]) - du[2] = u[1]*(p[2]-u[3]) - u[2] - du[3] = u[1]*u[2] - p[3]*u[3] -end -``` - -Since ModelingToolkit can trace generic numerical functions in Julia, let's trace it with Operations. When we do this, it'll spit out a symbolic representation of their numerical code: - -```{julia;line_width = 130} -u = [x,y,z] -du = similar(u) -p = [σ,ρ,β] -lorenz(du,u,p,t) -du -``` - -We can then perform symbolic manipulations on their numerical code, and build a new numerical code that optimizes/fixes their original function! - -```{julia;line_width = 130} -J = [Dx(du[1]) Dy(du[1]) Dz(du[1]) - Dx(du[2]) Dy(du[2]) Dz(du[2]) - Dx(du[3]) Dy(du[3]) Dz(du[3])] -J = expand_derivatives.(J) -``` - -### Automated Sparsity Detection - -In many cases one has to speed up large modeling frameworks by taking into account sparsity. While ModelingToolkit.jl can be used to compute Jacobians, we can write a standard Julia function in order to get a spase matrix of expressions which automatically detects and utilizes the sparsity of their function. - -```{julia;line_width = 130} -using SparseArrays -function SparseArrays.SparseMatrixCSC(M::Matrix{T}) where {T<:ModelingToolkit.Expression} - idxs = findall(!iszero, M) - I = [i[1] for i in idxs] - J = [i[2] for i in idxs] - V = [M[i] for i in idxs] - return SparseArrays.sparse_IJ_sorted!(I, J, V, size(M)...) -end -sJ = SparseMatrixCSC(J) -``` - -### Dependent Variables, Functions, Chain Rule - -"Variables" are overloaded. When you are solving a differential equation, the variable `u(t)` is actually a function of time. In order to handle these kinds of variables in a mathematically correct and extensible manner, the ModelingToolkit IR actually treats variables as functions, and constant variables are simply 0-ary functions (`t()`). - -We can utilize this idea to have parameters that are also functions. For example, we can have a parameter σ which acts as a function of 1 argument, and then utilize this function within our differential equations: - -```{julia;line_width = 130} -@parameters σ(..) -eqs = [D(x) ~ σ(t-1)*(y-x), - D(y) ~ x*(σ(t^2)-z)-y, - D(z) ~ x*y - β*z] -``` - -Notice that when we calculate the derivative with respect to `t`, the chain rule is automatically handled: - -```{julia;line_width = 130} -@derivatives Dₜ'~t -Dₜ(x*(σ(t^2)-z)-y) -expand_derivatives(Dₜ(x*(σ(t^2)-z)-y)) -``` - -### Hackability: Extend directly from the language - -ModelingToolkit.jl is written in Julia, and thus it can be directly extended from Julia itself. Let's define a normal Julia function and call it with a variable: - -```{julia;line_width = 130} -_f(x) = 2x + x^2 -_f(x) -``` - -Recall that when we do that, it will automatically trace this function and then build a symbolic expression. But what if we wanted our function to be a primative in the symbolic framework? This can be done by registering the function. - -```{julia;line_width = 130} -f(x) = 2x + x^2 -@register f(x) -``` - -Now this function is a new primitive: - -```{julia;line_width = 130} -f(x) -``` - -and we can now define derivatives of our function: - -```{julia;line_width = 130} -function ModelingToolkit.derivative(::typeof(f), args::NTuple{1,Any}, ::Val{1}) - 2 + 2args[1] -end -expand_derivatives(Dx(f(x))) -``` - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/ode_extras/02-feagin.jmd b/tutorials/ode_extras/02-feagin.jmd deleted file mode 100644 index 155c94d2..00000000 --- a/tutorials/ode_extras/02-feagin.jmd +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Feagin's Order 10, 12, and 14 Methods -author: Chris Rackauckas ---- - -DifferentialEquations.jl includes Feagin's explicit Runge-Kutta methods of orders 10/8, 12/10, and 14/12. These methods have such high order that it's pretty much required that one uses numbers with more precision than Float64. As a prerequisite reference on how to use arbitrary number systems (including higher precision) in the numerical solvers, please see the Solving Equations in With Chosen Number Types notebook. - -## Investigation of the Method's Error - -We can use Feagin's order 16 method as follows. Let's use a two-dimensional linear ODE. Like in the Solving Equations in With Chosen Number Types notebook, we change the initial condition to BigFloats to tell the solver to use BigFloat types. - -```julia -using DifferentialEquations -const linear_bigα = big(1.01) -f(u,p,t) = (linear_bigα*u) - -# Add analytical solution so that errors are checked -f_analytic(u0,p,t) = u0*exp(linear_bigα*t) -ff = ODEFunction(f,analytic=f_analytic) -prob = ODEProblem(ff,big(0.5),(0.0,1.0)) -sol = solve(prob,Feagin14(),dt=1//16,adaptive=false); -``` - -```julia -println(sol.errors) -``` - -Compare that to machine $\epsilon$ for Float64: - -```julia -eps(Float64) -``` - -The error for Feagin's method when the stepsize is 1/16 is 8 orders of magnitude below machine $\epsilon$! However, that is dependent on the stepsize. If we instead use adaptive timestepping with the default tolerances, we get - -```julia -sol =solve(prob,Feagin14()); -println(sol.errors); print("The length was $(length(sol))") -``` - -Notice that when the stepsize is much higher, the error goes up quickly as well. These super high order methods are best when used to gain really accurate approximations (using still modest timesteps). Some examples of where such precision is necessary is astrodynamics where the many-body problem is highly chaotic and thus sensitive to small errors. - -## Convergence Test - -The Order 14 method is awesome, but we need to make sure it's really that awesome. The following convergence test is used in the package tests in order to make sure the implementation is correct. Note that all methods have such tests in place. - -```julia -using DiffEqDevTools -dts = 1.0 ./ 2.0 .^(10:-1:4) -sim = test_convergence(dts,prob,Feagin14()) -``` - -For a view of what's going on, let's plot the simulation results. - -```julia -using Plots -gr() -plot(sim) -``` - -This is a clear trend indicating that the convergence is truly Order 14, which -is the estimated slope. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/ode_extras/03-ode_minmax.jmd b/tutorials/ode_extras/03-ode_minmax.jmd deleted file mode 100644 index e88b8f9d..00000000 --- a/tutorials/ode_extras/03-ode_minmax.jmd +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Finding Maxima and Minima of DiffEq Solutions -author: Chris Rackauckas ---- - -### Setup - -In this tutorial we will show how to use Optim.jl to find the maxima and minima of solutions. Let's take a look at the double pendulum: - -```julia -#Constants and setup -using OrdinaryDiffEq -initial = [0.01, 0.01, 0.01, 0.01] -tspan = (0.,100.) - -#Define the problem -function double_pendulum_hamiltonian(udot,u,p,t) - α = u[1] - lα = u[2] - β = u[3] - lβ = u[4] - udot .= - [2(lα-(1+cos(β))lβ)/(3-cos(2β)), - -2sin(α) - sin(α+β), - 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)), - -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)] -end - -#Pass to solvers -poincare = ODEProblem(double_pendulum_hamiltonian, initial, tspan) -``` - -```julia -sol = solve(poincare, Tsit5()) -``` - -In time, the solution looks like: - -```julia -using Plots; gr() -plot(sol, vars=[(0,3),(0,4)], leg=false, plotdensity=10000) -``` - -while it has the well-known phase-space plot: - -```julia -plot(sol, vars=(3,4), leg=false) -``` - -### Local Optimization - -Let's fine out what some of the local maxima and minima are. Optim.jl can be used to minimize functions, and the solution type has a continuous interpolation which can be used. Let's look for the local optima for the 4th variable around `t=20`. Thus our optimization function is: - -```julia -f = (t) -> sol(t,idxs=4) -``` - -`first(t)` is the same as `t[1]` which transforms the array of size 1 into a number. `idxs=4` is the same as `sol(first(t))[4]` but does the calculation without a temporary array and thus is faster. To find a local minima, we can simply call Optim on this function. Let's find a local minimum: - -```julia -using Optim -opt = optimize(f,18.0,22.0) -``` - -From this printout we see that the minimum is at `t=18.63` and the value is `-2.79e-2`. We can get these in code-form via: - -```julia -println(opt.minimizer) -println(opt.minimum) -``` - -To get the maximum, we just minimize the negative of the function: - -```julia -f = (t) -> -sol(first(t),idxs=4) -opt2 = optimize(f,0.0,22.0) -``` - -Let's add the maxima and minima to the plots: - -```julia -plot(sol, vars=(0,4), plotdensity=10000) -scatter!([opt.minimizer],[opt.minimum],label="Local Min") -scatter!([opt2.minimizer],[-opt2.minimum],label="Local Max") -``` - -Brent's method will locally minimize over the full interval. If we instead want a local maxima nearest to a point, we can use `BFGS()`. In this case, we need to optimize a vector `[t]`, and thus dereference it to a number using `first(t)`. - -```julia -f = (t) -> -sol(first(t),idxs=4) -opt = optimize(f,[20.0],BFGS()) -``` - -### Global Optimization - -If we instead want to find global maxima and minima, we need to look somewhere else. For this there are many choices. A pure Julia option is BlackBoxOptim.jl, but I will use NLopt.jl. Following the NLopt.jl tutorial but replacing their function with out own: - -```julia -import NLopt, ForwardDiff - -count = 0 # keep track of # function evaluations - -function g(t::Vector, grad::Vector) - if length(grad) > 0 - #use ForwardDiff for the gradients - grad[1] = ForwardDiff.derivative((t)->sol(first(t),idxs=4),t) - end - sol(first(t),idxs=4) -end -opt = NLopt.Opt(:GN_ORIG_DIRECT_L, 1) -NLopt.lower_bounds!(opt, [0.0]) -NLopt.upper_bounds!(opt, [40.0]) -NLopt.xtol_rel!(opt,1e-8) -NLopt.min_objective!(opt, g) -(minf,minx,ret) = NLopt.optimize(opt,[20.0]) -println(minf," ",minx," ",ret) -NLopt.max_objective!(opt, g) -(maxf,maxx,ret) = NLopt.optimize(opt,[20.0]) -println(maxf," ",maxx," ",ret) -``` - -```julia -plot(sol, vars=(0,4), plotdensity=10000) -scatter!([minx],[minf],label="Global Min") -scatter!([maxx],[maxf],label="Global Max") -``` - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/ode_extras/04-monte_carlo_parameter_estim.jmd b/tutorials/ode_extras/04-monte_carlo_parameter_estim.jmd deleted file mode 100644 index dbcd243a..00000000 --- a/tutorials/ode_extras/04-monte_carlo_parameter_estim.jmd +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: Monte Carlo Parameter Estimation From Data -author: Chris Rackauckas ---- - -First you want to create a problem which solves multiple problems at the same time. This is the Monte Carlo Problem. When the parameter estimation tools say it will take any DEProblem, it really means ANY DEProblem! - -So, let's get a Monte Carlo problem setup that solves with 10 different initial conditions. - -```julia -using DifferentialEquations, DiffEqParamEstim, Plots, Optim - -# Monte Carlo Problem Set Up for solving set of ODEs with different initial conditions - -# Set up Lotka-Volterra system -function pf_func(du,u,p,t) - du[1] = p[1] * u[1] - p[2] * u[1]*u[2] - du[2] = -3 * u[2] + u[1]*u[2] -end -p = [1.5,1.0] -prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),p) -``` - -Now for a MonteCarloProblem we have to take this problem and tell it what to do N times via the prob_func. So let's generate N=10 different initial conditions, and tell it to run the same problem but with these 10 different initial conditions each time: - -```julia -# Setting up to solve the problem N times (for the N different initial conditions) -N = 10; -initial_conditions = [[1.0,1.0], [1.0,1.5], [1.5,1.0], [1.5,1.5], [0.5,1.0], [1.0,0.5], [0.5,0.5], [2.0,1.0], [1.0,2.0], [2.0,2.0]] -function prob_func(prob,i,repeat) - ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p) -end -monte_prob = MonteCarloProblem(prob,prob_func=prob_func) -``` - -We can check this does what we want by solving it: - -```julia -# Check above does what we want -sim = solve(monte_prob,Tsit5(),num_monte=N) -plot(sim) -``` - -num_monte=N means "run N times", and each time it runs the problem returned by the prob_func, which is always the same problem but with the ith initial condition. - -Now let's generate a dataset from that. Let's get data points at every t=0.1 using saveat, and then convert the solution into an array. - -```julia -# Generate a dataset from these runs -data_times = 0.0:0.1:10.0 -sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times) -data = Array(sim) -``` - -Here, data[i,j,k] is the same as sim[i,j,k] which is the same as sim[k][i,j] (where sim[k] is the kth solution). So data[i,j,k] is the jth timepoint of the ith variable in the kth trajectory. - -Now let's build a loss function. A loss function is some loss(sol) that spits out a scalar for how far from optimal we are. In the documentation I show that we normally do loss = L2Loss(t,data), but we can bootstrap off of this. Instead lets build an array of N loss functions, each one with the correct piece of data. - -```julia -# Building a loss function -losses = [L2Loss(data_times,data[:,:,i]) for i in 1:N] -``` - -So losses[i] is a function which computes the loss of a solution against the data of the ith trajectory. So to build our true loss function, we sum the losses: - -```julia -loss(sim) = sum(losses[i](sim[i]) for i in 1:N) -``` - -As a double check, make sure that loss(sim) outputs zero (since we generated the data from sim). Now we generate data with other parameters: - -```julia -prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),[1.2,0.8]) -function prob_func(prob,i,repeat) - ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p) -end -monte_prob = MonteCarloProblem(prob,prob_func=prob_func) -sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times) -loss(sim) -``` - -and get a non-zero loss. So we now have our problem, our data, and our loss function... we have what we need. - -Put this into build_loss_objective. - -```julia -obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N, - saveat=data_times) -``` - -Notice that I added the kwargs for solve into this. They get passed to an internal solve command, so then the loss is computed on N trajectories at data_times. - -Thus we take this objective function over to any optimization package. I like to do quick things in Optim.jl. Here, since the Lotka-Volterra equation requires positive parameters, I use Fminbox to make sure the parameters stay positive. I start the optimization with [1.3,0.9], and Optim spits out that the true parameters are: - -```julia -lower = zeros(2) -upper = fill(2.0,2) -result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS())) -``` - -```julia -result -``` - -Optim finds one but not the other parameter. - -I would run a test on synthetic data for your problem before using it on real data. Maybe play around with different optimization packages, or add regularization. You may also want to decrease the tolerance of the ODE solvers via - -```julia -obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N, - abstol=1e-8,reltol=1e-8, - saveat=data_times) -result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS())) -``` - -```julia -result -``` - -if you suspect error is the problem. However, if you're having problems it's most likely not the ODE solver tolerance and mostly because parameter inference is a very hard optimization problem. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/test.jmd b/tutorials/test.jmd deleted file mode 100644 index a17a9e18..00000000 --- a/tutorials/test.jmd +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Test -author: Chris Rackauckas ---- - -This is a test of the builder system. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/type_handling/01-number_types.jmd b/tutorials/type_handling/01-number_types.jmd deleted file mode 100644 index f7c6ff07..00000000 --- a/tutorials/type_handling/01-number_types.jmd +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: Solving Equations in With Julia-Defined Types -author: Chris Rackauckas ---- - -One of the nice things about DifferentialEquations.jl is that it is designed with Julia's type system in mind. What this means is, if you have properly defined a Number type, you can use this number type in DifferentialEquations.jl's algorithms! [Note that this is restricted to the native algorithms of OrdinaryDiffEq.jl. The other solvers such as ODE.jl, Sundials.jl, and ODEInterface.jl are not compatible with some number systems.] - -DifferentialEquations.jl determines the numbers to use in its solvers via the types that are designated by `tspan` and the initial condition of the problem. It will keep the time values in the same type as tspan, and the solution values in the same type as the initial condition. [Note that adaptive timestepping requires that the time type is compaible with `sqrt` and `^` functions. Thus dt cannot be Integer or numbers like that if adaptive timestepping is chosen]. - -Let's solve the linear ODE first define an easy way to get ODEProblems for the linear ODE: - -```julia -using DifferentialEquations -f = (u,p,t) -> (p*u) -prob_ode_linear = ODEProblem(f,1/2,(0.0,1.0),1.01); -``` - -First let's solve it using Float64s. To do so, we just need to set u0 to a Float64 (which is done by the default) and dt should be a float as well. - -```julia -prob = prob_ode_linear -sol =solve(prob,Tsit5()) -println(sol) -``` - -Notice that both the times and the solutions were saved as Float64. Let's change the time to use rational values. Rationals are not compatible with adaptive time stepping since they do not have an L2 norm (this can be worked around by defining `internalnorm`, but rationals already explode in size!). To account for this, let's turn off adaptivity as well: - -```julia -prob = ODEProblem(f,1/2,(0//1,1//1),101//100); -sol = solve(prob,RK4(),dt=1//2^(6),adaptive=false) -println(sol) -``` - -Now let's do something fun. Let's change the solution to use `Rational{BigInt}` and print out the value at the end of the simulation. To do so, simply change the definition of the initial condition. - -```julia -prob = ODEProblem(f,BigInt(1)//BigInt(2),(0//1,1//1),101//100); -sol =solve(prob,RK4(),dt=1//2^(6),adaptive=false) -println(sol[end]) -``` - -That's one huge fraction! - -## Other Compatible Number Types - -#### BigFloats - -```julia -prob_ode_biglinear = ODEProblem(f,big(1.0)/big(2.0),(big(0.0),big(1.0)),big(1.01)) -sol =solve(prob_ode_biglinear,Tsit5()) -println(sol[end]) -``` - -#### DoubleFloats.jl - -There's are Float128-like types. Higher precision, but fixed and faster than arbitrary precision. - -```julia -using DoubleFloats -prob_ode_doublelinear = ODEProblem(f,Double64(1)/Double64(2),(Double64(0),Double64(1)),Double64(1.01)) -sol =solve(prob_ode_doublelinear,Tsit5()) -println(sol[end]) -``` - -#### ArbFloats - -These high precision numbers which are much faster than Bigs for less than 500-800 bits of accuracy. - -```julia -using ArbNumerics -prob_ode_arbfloatlinear = ODEProblem(f,ArbFloat(1)/ArbFloat(2),(ArbFloat(0.0),ArbFloat(1.0)),ArbFloat(1.01)) -sol =solve(prob_ode_arbfloatlinear,Tsit5()) -println(sol[end]) -``` - -## Incompatible Number Systems - -#### DecFP.jl - -Next let's try DecFP. DecFP is a fixed-precision decimals library which is made to give both performance but known decimals of accuracy. Having already installed DecFP with `]add DecFP`, I can run the following: - -```julia -using DecFP -prob_ode_decfplinear = ODEProblem(f,Dec128(1)/Dec128(2),(Dec128(0.0),Dec128(1.0)),Dec128(1.01)) -sol =solve(prob_ode_decfplinear,Tsit5()) -println(sol[end]); println(typeof(sol[end])) -``` - -#### Decimals.jl - -Install with `]add Decimals`. - -```julia -using Decimals -prob_ode_decimallinear = ODEProblem(f,[decimal("1.0")]./[decimal("2.0")],(0//1,1//1),decimal(1.01)) -sol =solve(prob_ode_decimallinear,RK4(),dt=1/2^(6)) #Fails -println(sol[end]); println(typeof(sol[end])) -``` - -At the time of writing this, Decimals are not compatible. This is not on DifferentialEquations.jl's end, it's on partly on Decimal's end since it is not a subtype of Number. Thus it's not recommended you use Decimals with DifferentialEquations.jl - -## Conclusion - -As you can see, DifferentialEquations.jl can use arbitrary Julia-defined number systems in its arithmetic. If you need 128-bit floats, i.e. a bit more precision but not arbitrary, DoubleFloats.jl is a very good choice! For arbitrary precision, ArbNumerics are the most feature-complete and give great performance compared to BigFloats, and thus I recommend their use when high-precision (less than 512-800 bits) is required. DecFP is a great library for high-performance decimal numbers and works well as well. Other number systems could use some modernization. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/type_handling/02-uncertainties.jmd b/tutorials/type_handling/02-uncertainties.jmd deleted file mode 100644 index 3583f0f4..00000000 --- a/tutorials/type_handling/02-uncertainties.jmd +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Numbers with Uncertainties -author: Mosè Giordano, Chris Rackauckas ---- - -The result of a measurement should be given as a number with an attached uncertainties, besides the physical unit, and all operations performed involving the result of the measurement should propagate the uncertainty, taking care of correlation between quantities. - -There is a Julia package for dealing with numbers with uncertainties: [`Measurements.jl`](https://github.com/JuliaPhysics/Measurements.jl). Thanks to Julia's features, `DifferentialEquations.jl` easily works together with `Measurements.jl` out-of-the-box. - -This notebook will cover some of the examples from the tutorial about classical Physics. - -## Caveat about `Measurement` type - -Before going on with the tutorial, we must point up a subtlety of `Measurements.jl` that you should be aware of: - -```julia -using Measurements - -5.23 ± 0.14 === 5.23 ± 0.14 -``` - -```julia -(5.23± 0.14) - (5.23 ± 0.14) -``` - -```julia -(5.23 ± 0.14) / (5.23 ± 0.14) -``` - -The two numbers above, even though have the same nominal value and the same uncertainties, are actually two different measurements that only by chance share the same figures and their difference and their ratio have a non-zero uncertainty. It is common in physics to get very similar, or even equal, results for a repeated measurement, but the two measurements are not the same thing. - -Instead, if you have *one measurement* and want to perform some operations involving it, you have to assign it to a variable: - -```julia -x = 5.23 ± 0.14 -x === x -``` - -```julia -x - x -``` - -```julia -x / x -``` - -## Radioactive Decay of Carbon-14 - -The rate of decay of carbon-14 is governed by a first order linear ordinary differential equation - -$$\frac{\mathrm{d}u(t)}{\mathrm{d}t} = -\frac{u(t)}{\tau}$$ - -where $\tau$ is the mean lifetime of carbon-14, which is related to the half-life $t_{1/2} = (5730 \pm 40)$ years by the relation $\tau = t_{1/2}/\ln(2)$. - -```julia -using DifferentialEquations, Measurements, Plots - -# Half-life and mean lifetime of radiocarbon, in years -t_12 = 5730 ± 40 -τ = t_12 / log(2) - -#Setup -u₀ = 1 ± 0 -tspan = (0.0, 10000.0) - -#Define the problem -radioactivedecay(u,p,t) = - u / τ - -#Pass to solver -prob = ODEProblem(radioactivedecay, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-8) - -# Analytic solution -u = exp.(- sol.t / τ) - -plot(sol.t, sol.u, label = "Numerical", xlabel = "Years", ylabel = "Fraction of Carbon-14") -plot!(sol.t, u, label = "Analytic") -``` - -The two curves are perfectly superimposed, indicating that the numerical solution matches the analytic one. We can check that also the uncertainties are correctly propagated in the numerical solution: - -```julia -println("Quantity of carbon-14 after ", sol.t[11], " years:") -println("Numerical: ", sol[11]) -println("Analytic: ", u[11]) -``` - -Both the value of the numerical solution and its uncertainty match the analytic solution within the requested tolerance. We can also note that close to 5730 years after the beginning of the decay (half-life of the radioisotope), the fraction of carbon-14 that survived is about 0.5. - -## Simple pendulum - -### Small angles approximation - -The next problem we are going to study is the simple pendulum in the approximation of small angles. We address this simplified case because there exists an easy analytic solution to compare. - -The differential equation we want to solve is - -$$\ddot{\theta} + \frac{g}{L} \theta = 0$$ - -where $g = (9.79 \pm 0.02)~\mathrm{m}/\mathrm{s}^2$ is the gravitational acceleration measured where the experiment is carried out, and $L = (1.00 \pm 0.01)~\mathrm{m}$ is the length of the pendulum. - -When you set up the problem for `DifferentialEquations.jl` remember to define the measurements as variables, as seen above. - -```julia -using DifferentialEquations, Measurements, Plots - -g = 9.79 ± 0.02; # Gravitational constants -L = 1.00 ± 0.01; # Length of the pendulum - -#Initial Conditions -u₀ = [0 ± 0, π / 60 ± 0.01] # Initial speed and initial angle -tspan = (0.0, 6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L)*θ -end - -#Pass to solvers -prob = ODEProblem(simplependulum, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-6) - -# Analytic solution -u = u₀[2] .* cos.(sqrt(g / L) .* sol.t) - -plot(sol.t, getindex.(sol.u, 2), label = "Numerical") -plot!(sol.t, u, label = "Analytic") -``` - -Also in this case there is a perfect superimposition between the two curves, including their uncertainties. - -We can also have a look at the difference between the two solutions: - -```julia -plot(sol.t, getindex.(sol.u, 2) .- u, label = "") -``` - -## Arbitrary amplitude - -Now that we know how to solve differential equations involving numbers with uncertainties we can solve the simple pendulum problem without any approximation. This time the differential equation to solve is the following: - -$$\ddot{\theta} + \frac{g}{L} \sin(\theta) = 0$$ - -```julia -g = 9.79 ± 0.02; # Gravitational constants -L = 1.00 ± 0.01; # Length of the pendulum - -#Initial Conditions -u₀ = [0 ± 0, π / 3 ± 0.02] # Initial speed and initial angle -tspan = (0.0, 6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L) * sin(θ) -end - -#Pass to solvers -prob = ODEProblem(simplependulum, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-6) - -plot(sol.t, getindex.(sol.u, 2), label = "Numerical") -``` - -We note that in this case the period of the oscillations is not constant. - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/type_handling/03-unitful.jmd b/tutorials/type_handling/03-unitful.jmd deleted file mode 100644 index 17105b7c..00000000 --- a/tutorials/type_handling/03-unitful.jmd +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Unit Checked Arithmetic via Unitful.jl -author: Chris Rackauckas ---- - -Units and dimensional analysis are standard tools across the sciences for checking the correctness of your equation. However, most ODE solvers only allow for the equation to be in dimensionless form, leaving it up to the user to both convert the equation to a dimensionless form, punch in the equations, and hopefully not make an error along the way. - -DifferentialEquations.jl allows for one to use Unitful.jl to have unit-checked arithmetic natively in the solvers. Given the dispatch implementation of the Unitful, this has little overhead. - -## Using Unitful - -To use Unitful, you need to have the package installed. Then you can add units to your variables. For example: - -```julia; wrap=false -using Unitful -t = 1.0u"s" -``` - -Notice that `t` is a variable with units in seconds. If we make another value with seconds, they can add - -```julia; wrap=false -t2 = 1.02u"s" -t+t2 -``` - -and they can multiply: - -```julia; wrap=false -t*t2 -``` - -You can even do rational roots: - -```julia; wrap=false -sqrt(t) -``` - -Many operations work. These operations will check to make sure units are correct, and will throw an error for incorrect operations: - -```julia; wrap=false -t + sqrt(t) -``` - -## Using Unitful with DifferentialEquations.jl - -Just like with other number systems, you can choose the units for your numbers by simply specifying the units of the initial condition and the timestep. For example, to solve the linear ODE where the variable has units of Newton's and `t` is in Seconds, we would use: - -```julia; wrap=false -using DifferentialEquations -f = (y,p,t) -> 0.5*y -u0 = 1.5u"N" -prob = ODEProblem(f,u0,(0.0u"s",1.0u"s")) -sol = solve(prob,Tsit5()) -``` - -Notice that we recieved a unit mismatch error. This is correctly so! Remember that for an ODE: - -$$\frac{dy}{dt} = f(t,y)$$ - -we must have that `f` is a rate, i.e. `f` is a change in `y` per unit time. So we need to fix the units of `f` in our example to be `N/s`. Notice that we then do not receive an error if we do the following: - -```julia; wrap=false -f = (y,p,t) -> 0.5*y/3.0u"s" -prob = ODEProblem(f,u0,(0.0u"s",1.0u"s")) -sol = solve(prob,Tsit5()) -``` - -This gives a a normal solution object. Notice that the values are all with the correct units: - -```julia; wrap=false -print(sol[:]) -``` - -We can plot the solution by removing the units: - -```julia; wrap=false -using Plots -gr() -plot(ustrip(sol.t),ustrip(sol[:]),lw=3) -``` - -```{julia; echo=false; skip="notebook"} -using DiffEqTutorials -DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/weave_tutorials.jl b/weave_tutorials.jl new file mode 100644 index 00000000..a6052620 --- /dev/null +++ b/weave_tutorials.jl @@ -0,0 +1,16 @@ +using SciMLTutorials +target = ARGS[1] +if isdir(target) + if !isfile(joinpath(target, "Project.toml")) + error("Cannot weave folder $(target) without Project.toml!") + end + println("Weaving the $(target) folder") + SciMLTutorials.weave_folder(target) +elseif isfile(target) + folder = dirname(target)[11:end] # remove the tutorials/ + file = basename(target) + println("Weaving $(folder)/$(file)") + SciMLTutorials.weave_file(folder, file) +else + error("Unable to find weaving target $(target)!") +end