diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml new file mode 100644 index 00000000..3494a9f1 --- /dev/null +++ b/.JuliaFormatter.toml @@ -0,0 +1,3 @@ +style = "sciml" +format_markdown = true +format_docstrings = true diff --git a/.buildkite/.gitignore b/.buildkite/.gitignore new file mode 100644 index 00000000..46de5d5e --- /dev/null +++ b/.buildkite/.gitignore @@ -0,0 +1 @@ +ssh_deploy.key diff --git a/.buildkite/0_webui.yml b/.buildkite/0_webui.yml new file mode 100644 index 00000000..af44a7d7 --- /dev/null +++ b/.buildkite/0_webui.yml @@ -0,0 +1,28 @@ +agents: + queue: "juliaecosystem" + sandbox.jl: true + +steps: + - label: ":unlock: Launch tutorials build if hash check successful" + branches: "!gh-pages" + plugins: + - staticfloat/cryptic#v2: + signed_pipelines: + - pipeline: .buildkite/launch_tutorials.yml + signature_file: .buildkite/launch_tutorials.yml.signature + inputs: + - .buildkite/run_tutorial.yml + - .buildkite/publish_tutorials_output.sh + allow_hash_override: true + command: "true" + + - label: ":runner: Dynamically launch test suite" + plugins: + - staticfloat/forerunner: + # This will create one job overall, throwing all path information away + watch: + - "src/**/*.jl" + - "src/*.jl" + - "**/*.toml" + target: .buildkite/test_sciml.yml + target_type: simple diff --git a/.buildkite/cryptic_repo_keys/.gitignore b/.buildkite/cryptic_repo_keys/.gitignore new file mode 100644 index 00000000..f84d0896 --- /dev/null +++ b/.buildkite/cryptic_repo_keys/.gitignore @@ -0,0 +1,7 @@ + +# Ignore the unencrypted repo_key +repo_key + +# Ignore any agent keys (public or private) we have stored +agent_key* + diff --git a/.buildkite/cryptic_repo_keys/repo_key.2297e5e7 b/.buildkite/cryptic_repo_keys/repo_key.2297e5e7 new file mode 100644 index 00000000..6065ce29 Binary files /dev/null and b/.buildkite/cryptic_repo_keys/repo_key.2297e5e7 differ diff --git a/.buildkite/launch_test_sciml.yml b/.buildkite/launch_test_sciml.yml new file mode 100644 index 00000000..85079b4a --- /dev/null +++ b/.buildkite/launch_test_sciml.yml @@ -0,0 +1,16 @@ +agents: + queue: "juliaecosystem" + sandbox.jl: true + +steps: + - label: ":runner: Dynamically launch test_sciml" + branches: "!gh-pages" + plugins: + - staticfloat/forerunner: + # This will create one job overall, throwing all path information away + watch: + - "src/**/*.jl" + - "src/*.jl" + - "**/*.toml" + target: .buildkite/test_sciml.yml + target_type: simple diff --git a/.buildkite/launch_tutorials.yml b/.buildkite/launch_tutorials.yml new file mode 100644 index 00000000..81eebe74 --- /dev/null +++ b/.buildkite/launch_tutorials.yml @@ -0,0 +1,19 @@ +agents: + queue: "juliaecosystem" + sandbox.jl: true + +steps: + - label: ":runner: Dynamically launch run_tutorial.yml" + branches: "!gh-pages" + env: + BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET: ${BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET?} + depends_on: + plugins: + - staticfloat/forerunner: + # This will create one job per project + watch: + - tutorials/**/*.jmd + - tutorials/**/*.toml + path_processor: .buildkite/path_processors/project-coalescing + target: .buildkite/run_tutorial.yml + target_type: template \ No newline at end of file diff --git a/.buildkite/launch_tutorials.yml.signature b/.buildkite/launch_tutorials.yml.signature new file mode 100644 index 00000000..8f286f74 --- /dev/null +++ b/.buildkite/launch_tutorials.yml.signature @@ -0,0 +1,2 @@ +Salted__ +HX+D;HN2qhb=c$J0~0~dх3A܉YrB{?󒟭z P \ No newline at end of file diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml deleted file mode 100644 index fb7ed9f7..00000000 --- a/.buildkite/pipeline.yml +++ /dev/null @@ -1,20 +0,0 @@ -steps: - - label: ":runner: Dynamically launch Pipelines" - plugins: - - staticfloat/forerunner#554da94e67f34d42728dfdec5c69b88b6f6240db: - # This will create one job per project - watch: - - tutorials/**/*.jmd - - tutorials/**/*.toml - path_processor: .buildkite/path_processors/project-coalescing - target: .buildkite/run_tutorial.yml - target_type: template - - staticfloat/forerunner#554da94e67f34d42728dfdec5c69b88b6f6240db: - # This will create one job overall, throwing all path information away - watch: - - src/**/*.jl - - "**/*.toml" - target: .buildkite/test_sciml.yml - agents: - queue: "juliacpu" - fastcpu: true diff --git a/.buildkite/publish_tutorials_output.sh b/.buildkite/publish_tutorials_output.sh index fe8f50a7..c4b0535f 100755 --- a/.buildkite/publish_tutorials_output.sh +++ b/.buildkite/publish_tutorials_output.sh @@ -1,18 +1,23 @@ #!/bin/bash # Ensure that our git wants to talk to github without prompting +mkdir -p ~/.ssh ssh-keyscan github.com >> ~/.ssh/known_hosts +git config --global user.email "buildkite@julialang.org" +git config --global user.name "SciML Tutorials CI" # Clone SciMLTutorialsOutput to temporary directory temp_dir=$(mktemp -d) git -C "${temp_dir}" clone git@github.com:SciML/SciMLTutorialsOutput . # Copy our output artifacts into it: -for d in html markdown notebook pdf script; do +for d in docs html notebook pdf script markdown; do cp -vRa "${d}/" "${temp_dir}" done +cp -va *.md *.bib "${temp_dir}" # Commit the result up to output +set -e git -C "${temp_dir}" add . git -C "${temp_dir}" commit -m "Automatic build\nPublished by build of: ${BUILDKITE_REPO%.git}/commit/${BUILDKITE_COMMIT}" git -C "${temp_dir}" push diff --git a/.buildkite/run_tutorial.yml b/.buildkite/run_tutorial.yml index f3d0a1ed..1bf05b99 100644 --- a/.buildkite/run_tutorial.yml +++ b/.buildkite/run_tutorial.yml @@ -1,24 +1,32 @@ +agents: + queue: "juliaecosystem" + sandbox.jl: true + arch: "x86_64" + # This is a pipeline that weaves a tutorial, then uploads the resultant # .PDF and other reports as (buildkite, not Julia) artifacts. The `coppermind` # configuration memoizes the result, so that identical inputs don't get -# weavd multiple times. - -env: - # Encrypted credentials to allow uploading to our S3 bucket that coppermind caches to - SECRET_BUILDKITE_S3_ACCESS_KEY_ID: "WTTuQqw0HjdHmvlwRzeQyFz/3QOl2lVn9ZhAr+zGGh3JBNIlMKWiXQlIZdvh5d0BOIXH+hNi7t62nfltP6rZwuaPKK4G1erIkwFsr4s9H/UQLmyZRrYZsNzaXxYm34mhQe9pEvK7Ewv3RI3wEPZSs4vOXFfTMlPMllMHPOAJCTg3OJuuhLD/qMmBjmAPClpE/ZKzwhzdRvB3kRGt63NRHDia5170gNOpBYcpbt7dDonkeWtn5Ri0YFSGpfTPdMONamLq5zKLqNJcmbbU5B1lFJXpm+msckPCfI4iuY0kF27JoWDALU+X6ZoSDcdLc/qsSYvVEC5HXjJARPOevofkjA==;U2FsdGVkX18msUyQ0k4CYDnHKj7/SzHYIVaSkPu68Lw0Nf0Pg+rjiig35VTXQo1u" - SECRET_BUILDKITE_S3_SECRET_ACCESS_KEY: "PZ/yImpWOe8AXhkHVd/AQWZv6YpNxEkJIklEVXBhOZG/MbFI75R7bNAGFgkjfB3ataZiZbeQnQha0CWnrk/62lQlnfDo7q+71Cg1SheCZxw+pHAumDUUqd6OEvHbs9wf66PNNB/lAPBZZrb/v+0E3LKxoe+Bf7WAz7DiXLsB9jdbvely3CX/xIgzUI4B5Vsg5rQ5H9UnYFoEHgiZNhUGdD+plZnJ2qNN8x4OZIMEwyBNLwpmkAmRMxjQzD3IoyGFcHuOhEr4igZ1D+XtQ5sTfW4KTKjewn7mHUldSGiBsgEhpmy6kpodsIDhhC8Ittd/DbfPS3cQknCvfj++fU/WvQ==;U2FsdGVkX19e3zOwkCXS7ceL8n+wE8Jy0ywueT/N/lu5BL0XSEB3OoseNjzLwR6iSQrx//e5N+TkVRcOIDTW3Q==" - BUILDKITE_S3_DEFAULT_REGION: "us-east-1" - - # Encrypted SSH deploy key that allows us to deploy to SciMLTutorialsOutput - SECRET_SSH_DEPLOY_KEY: "UGz8Ovo5mXevfgi3KkF8PUUP3hKru98t9tUUvPDHawqIJdlEu2/6LmPo+nN1DFMUn8ahnxkxNy7QA6wJMgi19kD5QHAxkYjEY+xtWJw9su04ZPBiIsUWcOb+sEaTyhV3iibQBXbTmsbiWdLuvX2p9JN3ho8GoFHIODYY7BvBcXrpYedbylIaAqL1XfUmcmKk/mOoIPiRyrM+CFcGYj+fZ67mZNJuuAY+10WSbANwOfg/AsEQEVvgWU5H0KjLI0VOKvKjSzU4dlabI7o2LAf6YE6EFLpokqwwlGQpYAPDXhts5YlOFgJBol3wwbgAjaCvIFlb0/JD06Zy3inA55jqPw==;U2FsdGVkX18SNZN1oR4Eda9vCUJ7p0cclSy31PbAbX69DjrMHkRvShjVCOf0OZPqv9L3mfvMW0PCg2CzDIJhGBZPD9PtlffC5D6h8v37zZ/J/9Gl3UAoCfJlfCfFym0ks8esZrrXRnzVFO3fdAeTNwBTyM7rethc7G15vQrkTdHBzLvDldLcC7U9j11FHXJa/fhdso9j7z/TY5MdUxAq99aT/9M1G8K+7X7lEBW1gT2bHiPOb35O8wZM/Dvs9SZUzGALxe77dKsQNN5CncMuniOTQBhl7DuNAHrk6kFyyHKvXBBEp5YOAAP7h9wqeWXTbtqcji0b/onhFEzI27+A078veWyYAiuLFj/ktB8yG8A2ACzoY3ogQo7KNFnFzGTGtlJ12W4HVuffrN1+/x5Wnkf+4Wyr3Gk7jCY0G4umUtJQlNSh9leUmpzcOnv/tLIvhuXR4YS/8DiQAFsBVvR+AFs3jLIePCyw76nmc1goYc1FnzteUdnGtCtRCqOQtFU5MtHHUU/1j3S+wVV/Bo8GrzZsvLa9qIFrzg7WLbBEO1Zjd6bbpjBFh/LrdpiWuHgF/ZIAzStM7/mx7kLdeAe/hEfyqtSmFvTkk5wUiwE0BhzS05nZmLgiP4mrRxSbkc/mOV3ZzSUsTm9yD6ilnHhKMw0FbxA0z6o2ubazc2dG/wFJ5h6ILzCnlBOBFsG49U2jq/Sztq21i+xXIVGy1la9W7Ll7wHpnz4lft01iht/tjKd8axypTWC//RE3YBjM0cheZVu7eJgQTjpEalm4MFjtYBestLpJUVPoF0alyKMdO5+vxjb6h2cuJ4WyPJqdOpYjX7Ji7Go+9T+ztqPK8sg5o1axccPXJBWpMSKCXD+g4uZa2HdEnefMt8YjSEWI16qb74rM40ckirMHOONpLeKQmLzZ55p5AgcXK/IWNcn7t56uceEEI1PHMmlCm4MCEiw6NypsJEsAcubANIDdaJRHTRd2d6NOw9JDC+qnd9meT15wrxtU5L594GDe1/wZATI9rkRnYFOytHBpVmGyC5VPho6svF2xx5biU7Lv0Z/p6Pe4+0xLB+XfTceAR//VikbzSXQRtbWB5BGM/6apB01xPnp6XB8sFDT6hTo4Wz9YlYtpte6WNOMgDri6u7DGQVHjG2oMSrMoXnujE1XCnL1MTmqgTCmRzbxpF5nDi0/ECdWPtQdUwRGfzuLqohn3dpqvhhrtLfuIL9yWrXjmJoM1VZ8wTNdjX475g21NErqhkFmKl3B3s/5tgEP2MOWsdGLePODrDBCaHKsLhsl8xhM32KQLC6x2TLeAaoLbX40OC7mje60uThmACu6+wOmQ3PTQgErs5kOrtzrRfucSqLpRQ9bTEVn/g3G8mGMTx1bALbp2zEgIgdg2+8YqWq+lgJk8rbwNPBSnbP/r20SZHWtuS9nFyiKwEA80QuqgGTkew7lmW1jXG9FQkJbAJWlxsOFEH6L0SFniXj9j3tzYHe9ERpcg6BQDfjIl8nnjRI4Ha70vPLcLCNjeFtwJD23isqN5VAJTcpFGvnzITLFFhQbE4QPRKieuCnHtaCCDsQkfuYytf7IO8Tkc+dH/jIt+kru7gvdYXMEpE3eymL1JFWuziOvMo1Z4LQFJYW448SFgyK/9UQZ+zCVZL+oxUtvF3TNT9SH3iv0qR8PE30U9/tHWoplNjOJOydxRfTEKXPyVCOg31b1qtYR/DshwsumfEca1qSkDFRbLxIQE5o/qbXvmxQg8YfTgSzj2sZWk/qbzTL20kYbqYLObJu8nzJUaEQNvupZLqypxFd7GiiFoQWzmoVApl+A7VHuDGiu1iWpNOuOKCUqZM19A93P0qZGUKCt2G5F1VqAbDY285Uf2+r+cxv48y7/7TcGPt05Fwsbyl4hV4ZXbwgvZI2+F/N+0+YEzbXTTUFoYGoOKuPjWO+JQNGB/FIXIs7p6Fuks6U++kj+O5WFJR8RY5A2E8AEm2uG5Ypm1Y51G+c6ruysR/buMY+aYhpvCLyrcJP3zTqQukoP9iysm8BCxJEpn4xthaNiVzSjfoXwR2Hvlwk7z6S6n6ZqY19HZeG0bcDnYJ5NNPAXwLa+PHaOR6CP4m1Q8Y8VPJtn9v3CSRJU3GFfH5HxPixdlc0zG55uKyLyB4ojF4Qt9V6a7FSDhcItJkHp1opR52HyMRLqbPW3SNpwblpqaY8a83ijlSBrN7ZquJADuZzJ+G1S47hObvNGN0I+ARELLzuQlP8mNk8EKfyqwIMqOpvnRepQeSwk89Tw5BbCTc/PS2fbanrEsLRiGt9lk6hAWBLj+5wCLuPnDaE0D/VM49B4BiPGGleWwXMSLOJscqLJeH0TlV//HOjfRs0aLxwpKzXdvvzFBXyUe9Gz1R7edKN5ObHAfS89beIIu+T5rgdi1sShYU8dvZfNBxTmiqImCoA1vMJMt0qe+ERn8IVdw5XJ0Lhpg446xfCYxuyR9HS4d/a4mobUEYg5+2Mn+PxJGRxVahxdR3us8iWY8LkPpXCjDb8Cbjt1rAScdOybnpdQjfGfAxtpDbE0IyeauSqFKNifz9Et52MbgwlyEfYwKdAqDbbigRBQfQCykrGSvlOVos2PTL1RjLFMZcdE73eKGaFRSnHtfvLBGiOqupjy8tSgL4hbfsgouO5fcdq0MRsgOf4lrS10zx3FkZC0sFhPu9yYSE5jrYA+hA4IsBbVUTWfDUys0meMo62buKFxvl1W2wD9zdPql14whGLqcyiqrePxvpZfQ13vKM96D41TfcRXue/Gk5R/OysjZ9NyZ3iEy4vtFzVBXufuIm8BpSfw4Vsvyw31xJ+oTt7nHVeHeNv4YuqCSaUAKXj2rwoqbrdCNvnJp8p4jI2gwypAJhTDZgZCHbh8JpftPInYAPYsdQOiKrtU0BhkbRb2Y6H+cxfVcgWP6v1Ab6uiviGeZi4PyNX+4pZsRlCgZKVedWB5hvl54u9zIqruTu91qaX2UEhEsio7IwtE5R9u9T83aXrDWrRcrGMLp2v4j4zZEIIC6pd0hiATvltXIO0ST6VnfDZ7ZNYzhxY+4eEq2DnRv8D2bLrRAgwO6kEuUNMZeoTXzaGxqrgsSQiOuGdUD9odZVDWeXygZuAobqkJW3iYMVDpP1GPlSMpyOvnNkQE3RNC8umOu44+vknXxO1xucUUbwl/fwJA4FAkTXp6Xz4+mgBXZ78vXo49OMWw1LQcQt/ZX2nuD66z5HyCrQGvEjOF0Py3Mb20kPJp1XBtONfrHO3Icxf7IWs7GkQ5dvtmA0M8VIc/pUXlcICimz+xANQgd9F433DHYjnEPuo+vQr0XZvaj4K4e+7c2fSy2KHfMEMs8e4dOtZ5Sav1RAFlQWiEuGW0jh1fDAkKb7jl5gNo25en81RJD3zKKBKZxT8UCIHH/W+bH0fSKOQU5bOIpynJcuaTKB87v/f1VvVPcByGoO2l5biWPknK7JYrwZMQI3otj9HBj9Jv53eg8Mux/cF+kIoUl75FnLKNlcoLSo5eRak/+O736DV8O3xhploDD6i9tLZ3sQJX9TVEp+rzHDlY9mLPZu0bjIaR/39ltOsVfpXWBYkD7XaeFgQqoIoHQflYiymP+y4G9UujNdbePvYNq5yl1+m+YbXpKZhjJ3lqsFYO6DAZk5q5w14fCm6ITBTyeC4zCQmP1hXRDwnlnprEdDi3FP704TDdKWNmrGuqaoijgjlbpcWUAAe06J4qKdcw63HQ6d1vYUhV0ia3CntztdpaChz/r8dV9ChbNrQwX8tvUWevvHfjorbJwM1fYmwnoF3AyuJgQIicb4bgZQzxWICSb9m1F6qpu6J2M6s8aXHQPG1Z5i/je1igbmBcXezht3/9csEQWl8v5h+UJGS3oRA60fr9IHDclV14SEFxYF9G1BokNNbRsP1myeeF1e25ymshjgNUOaDkFk0sQl1D8+IOWO1NS4f5IGqsdUQliP8zvc5rFLyqhLeIcKJz4PPcfjrYMeZ1ASA/R14RlQ0EhrmPvjYKlm5EBsWwmT8lmFqyRJZpRpYFeZwR39hfXbTfpNPJkRRyokgr4Nx2P0NHytOhTv1oWTS/+HAdQwzARMw9XTwe9UNQ4nmFSbOThOJqQvAR8OST+Q9fCIIgf5Hg1lmWi9TAqYqPo09Lb4Kk4hYTibxfvY+wfeC2FlO/26VoclcNHVriJ9/ot4pBJ+XK/ap33FBpXpKQyhoskFszcS7wTwZzNK9q50eERVb1XEHDYScLTgUIFrTFXkNA5x7KS2DXifGx6HXO0WVsBGrX4iCvpt3IBYay4WVZPWSCK4iUOOELRouuciAA8PbLQ5IOAiWLepLTX5+Xp1fRNz42F0vrpX9YPKPvP6HZA0/z62JY6ktPr+2W+iNe8ZUTur0NoQd7s+sQvw9GaZxD0CBOvExfunJWGxXCz7o9a7xRwasV8gCwNcXsxslp5AMudrYssGcUQQ/6XuFbVvxf9xHRGM/aimGUwRoxio3Tr2aKWJgqrRQJmKpvyeRZD0mySkLYAFLz8Nlq0AOO9JKMJpDyfCOX9Rqw7GJ82sGoUZhrwGbwjEiXra3fD1R4CChvXcTnWniavR3P9bR9Uq71AIM+jLeqSfDhSJO/m8d85FVY+avd/5pM+hQkUsi+KI+UoV/VPL67y9P/r45XCFujhrKn+RN2" - +# weaved multiple times. steps: - label: ":hammer: {PATH}" key: "tutorial-{SANITIZED_PATH}" + env: + BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET: ${BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET?} plugins: + - staticfloat/cryptic#v2: + variables: + - BUILDKITE_S3_ACCESS_KEY_ID="U2FsdGVkX1/ckce1vUF8A17rHLxcAlAou4aokaeS8YL6omsA1Vq1IDZko5cL1Z+t" + - BUILDKITE_S3_SECRET_ACCESS_KEY="U2FsdGVkX1+SPF81nkK7KQ64DsafSl0qq2iG7BsQs1xlTYEtZV3MqQl3l/NWaiocaEywZZFbAB5zpnKPD0xHTQ==" + - BUILDKITE_S3_DEFAULT_REGION="U2FsdGVkX1/cORlxhXcxhja2JkqC0f8RmaGYxvGBbEg=" - JuliaCI/julia#v1: - version: 1.6 - - staticfloat/coppermind: + version: 1.8 + - staticfloat/sandbox: + rootfs_url: "https://jc-rootfs-images.s3.amazonaws.com/aws_uploader-2021-11-12.x86_64.tar.gz" + rootfs_treehash: "986217e5b36efd3b3b91ed90df8e36d628cf543f" + workspaces: + # Include the julia we just downloaded + - "/cache/julia-buildkite-plugin:/cache/julia-buildkite-plugin" + - staticfloat/coppermind#v1: inputs: # We are sensitive to the actual tutorial changing - {PATH} @@ -27,45 +35,58 @@ steps: # We are sensitive to our overall dependencies changing - ./*.toml outputs: - - html/**/*.html + #- html/**/*.html + - markdown/**/figures/*.png - markdown/**/*.md - notebook/**/*.ipynb - pdf/**/*.pdf - script/**/*.jl s3_prefix: s3://julialang-buildkite-artifacts/scimltutorials - timeout_in_minutes: 360 + timeout_in_minutes: 1000 commands: | # Instantiate, to install the overall project dependencies echo "--- Instantiate" - julia --project=. -e 'using Pkg; Pkg.instantiate()' + julia --project=. -e 'using Pkg; Pkg.instantiate(); Pkg.build()' # Run tutorial echo "+++ Run tutorial for {PATH}" julia --project=. weave_tutorials.jl "{PATH}" - agents: - queue: "juliacpu" - fastcpu: true - label: ":rocket: Publish {PATH}" + env: + BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET: ${BUILDKITE_PLUGIN_CRYPTIC_BASE64_SIGNED_JOB_ID_SECRET?} plugins: + - staticfloat/cryptic#v2: + variables: + - BUILDKITE_S3_ACCESS_KEY_ID="U2FsdGVkX1/ckce1vUF8A17rHLxcAlAou4aokaeS8YL6omsA1Vq1IDZko5cL1Z+t" + - BUILDKITE_S3_SECRET_ACCESS_KEY="U2FsdGVkX1+SPF81nkK7KQ64DsafSl0qq2iG7BsQs1xlTYEtZV3MqQl3l/NWaiocaEywZZFbAB5zpnKPD0xHTQ==" + - BUILDKITE_S3_DEFAULT_REGION="U2FsdGVkX1/cORlxhXcxhja2JkqC0f8RmaGYxvGBbEg=" + files: + - .buildkite/ssh_deploy.key + - JuliaCI/julia#v1: + version: 1.8 + - staticfloat/sandbox: + rootfs_url: "https://jc-rootfs-images.s3.amazonaws.com/aws_uploader-2021-11-12.x86_64.tar.gz" + rootfs_treehash: "986217e5b36efd3b3b91ed90df8e36d628cf543f" + workspaces: + # Include the julia we just downloaded + - "/cache/julia-buildkite-plugin:/cache/julia-buildkite-plugin" # Use coppermind to download the tutorial results that were calculated in the # weaving job above. Note we still list `outputs` here, since we have the # option to extract only a subset of them here. - - staticfloat/coppermind: + - staticfloat/coppermind#v1: input_from: "tutorial-{SANITIZED_PATH}" outputs: - - html/**/*.html + #- html/**/*.html + - markdown/**/figures/*.png - markdown/**/*.md - notebook/**/*.ipynb - pdf/**/*.pdf - script/**/*.jl s3_prefix: s3://julialang-buildkite-artifacts/scimltutorials - staticfloat/ssh-agent: - keyvars: - - "SSH_DEPLOY_KEY" - agents: - queue: "juliacpu" - fastcpu: true + keyfiles: + - .buildkite/ssh_deploy.key commands: .buildkite/publish_tutorials_output.sh # Don't run this unless we're on the master branch, and not until the actual weave # command has had a chance to run. diff --git a/.buildkite/ssh_deploy.key.encrypted b/.buildkite/ssh_deploy.key.encrypted new file mode 100644 index 00000000..9e0edc3a Binary files /dev/null and b/.buildkite/ssh_deploy.key.encrypted differ diff --git a/.buildkite/test_sciml.yml b/.buildkite/test_sciml.yml index 764db722..ca906117 100644 --- a/.buildkite/test_sciml.yml +++ b/.buildkite/test_sciml.yml @@ -1,8 +1,12 @@ +agents: + queue: "juliaecosystem" + arch: "x86_64" + steps: - - label: ":julia: Run tests on 1.6" + - label: ":julia: Run tests on 1.8" plugins: - JuliaCI/julia#v1: - version: 1.6 + version: 1.8 - JuliaCI/julia-test#v1: timeout_in_minutes: 20 artifact_paths: @@ -17,8 +21,4 @@ steps: # Upload Julia script - "script/Testing/*.jl" agents: - queue: "juliacpu" - fastcpu: true - env: - SECRET_BUILDKITE_S3_ACCESS_KEY_ID: "KUQ9aTfZGaherfjpIVbaRcFLr8KYDxYBkbj3GESYDUQlSOcbX3Yl1nt467QVyGYI/ymLq+ryHlzh+0RMTMBkoj6dfh1OSubGw5Z6eYpSTRJ6PCcJa1L2HUMpBMHM84pAWUw9VtrUJ8XclNmZVN3OmYYRyGrl8RipP9hw7FQzk+TslAiciBuXYXrMaxReBC+lfnyDK0FWY52PzFr47+C1CkSnvG69uuJh6psIrlRpTO9OtHuhxprq9hAN21MlMwlRIOtkQyRX0zml6DdZOfeWmXyEdU+LP4QgZhHqCSo1ERulSr3LBwT2TtGdszFunmF2rRJnWiuLJheuxYTcnmak2Q==;U2FsdGVkX1+5nR8Dn6gwe40OV6pdTcn2Q8N2mUm2rkTcXIVgXcXuedLT3gJJOkVQ" - SECRET_BUILDKITE_S3_SECRET_ACCESS_KEY: "p3pz0WyMPIlBGJMI9z0tsWnPMmrr9CyqlPFhZxAgcfZXvh2AhQYgSQbdjC5kMOxar+qoYW18FOleER064d8xLM8fq0jfNVVXS8K1hsyqeDBVX0vhrCGxuOZ23hbsuL0lI9y+W9wu2CbkxHfTuhwV1twZ0w/ybYpDesiit8Wj/34Yq2lbCqefVdTMImKv9PqLtVDDzLgPMGTrFoaZCLhFKLR8Lwv9Pz5oe/RmGjzrdXEon43jVMj6spVyCWFkDnM+ePn8bvdirVvqUCuAUQoMeZBl/jFagEuru6sY2nx6t1qvqlccJYb1QMMcLk08vu7cyRq6FLaMkX9N1Dj7MsSKUQ==;U2FsdGVkX1/X8mjJqSXb7qs1efGkEwNCkx+/6dr/pCUx4AMey9DGXLHZS0tH2pfM8Pv5XyDmSvMXodyc1KZeCw==" + queue: "juliaecosystem" diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..700707ce --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/CompatHelper.yml b/.github/workflows/CompatHelper.yml index adab1f58..c3e22990 100644 --- a/.github/workflows/CompatHelper.yml +++ b/.github/workflows/CompatHelper.yml @@ -10,7 +10,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v5 - name: Pkg.add("CompatHelper") run: julia -e 'using Pkg; Pkg.add("CompatHelper")' - name: CompatHelper.main() diff --git a/.gitignore b/.gitignore index edc7933e..06b4ac54 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,18 @@ -*.jl.cov -*.jl.*.cov -*.jl.mem .ipynb_checkpoints +*/.ipynb_checkpoints/* *.tmp *.aux *.log *.out *.tex -Manifest.toml -tutorials/**/*.html -tutorials/**/*.pdf +tmp*/ +gks.svg /*/*/jl_*/ +/Manifest.toml + +# We're going to store these in a separate repository now +html/ +script/ +pdf/ +notebook/ +markdown/ diff --git a/LICENSE.md b/LICENSE.md index 892203fa..6ec510bc 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -19,4 +19,3 @@ The SciMLTutorials.jl package is licensed under the MIT "Expat" License: > LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, > OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE > SOFTWARE. -> diff --git a/Project.toml b/Project.toml index 41653922..d8316660 100644 --- a/Project.toml +++ b/Project.toml @@ -1,11 +1,12 @@ name = "SciMLTutorials" uuid = "30cb0354-2223-46a9-baa0-41bdcfbe0178" authors = ["Chris Rackauckas "] -version = "0.9.0" +version = "1.0.0" [deps] IJulia = "7073ff75-c697-5162-941a-fcdaad2a7d2a" InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" +Markdown = "d6f4376e-aef5-505a-96c1-9c027394607a" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" Weave = "44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9" @@ -14,4 +15,4 @@ Weave = "44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9" IJulia = "1.20" Plots = "1.6" Weave = "0.10" -julia = "1.4" +julia = "1.6" diff --git a/README.md b/README.md index eb88247d..10971dbd 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,13 @@ # SciMLTutorials.jl: Tutorials for Scientific Machine Learning and Differential Equations -[![Build Status](https://github.com/SciML/SciMLTutorials.jl/workflows/CI/badge.svg)](https://github.com/SciML/SciMLTutorials.jl/actions?query=workflow%3ACI) +[![Join the chat at https://julialang.zulipchat.com #sciml-bridged](https://img.shields.io/static/v1?label=Zulip&message=chat&color=9558b2&labelColor=389826)](https://julialang.zulipchat.com/#narrow/stream/279055-sciml-bridged) +[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](http://tutorials.sciml.ai/stable/) +[![Global Docs](https://img.shields.io/badge/docs-SciML-blue.svg)](https://docs.sciml.ai/dev/highlevels/learning_resources/#SciMLTutorials) -[![Join the chat at https://gitter.im/JuliaDiffEq/Lobby](https://badges.gitter.im/JuliaDiffEq/Lobby.svg)](https://gitter.im/JuliaDiffEq/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Build status](https://badge.buildkite.com/8a39c2e1b44511eb84bdcd9019663cad757ae2479abd340508.svg)](https://buildkite.com/julialang/scimltutorials-dot-jl) + +[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac) +[![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle) SciMLTutorials.jl holds PDFs, webpages, and interactive Jupyter notebooks showing how to utilize the software in the [SciML Scientific Machine Learning ecosystem](https://sciml.ai/). @@ -11,77 +16,81 @@ and the [devdocs](http://devdocs.sciml.ai/latest/) by providing practical examples of the concepts. For more details, please consult the docs. +#### Note: this library has been deprecated and its tutorials have been moved to the repos of the respective packages. It may be revived in the future if there is a need for longer-form tutorials! + +## Results + +To view the SciML Tutorials, go to [tutorials.sciml.ai](https://tutorials.sciml.ai/stable/). By default, this +will lead to the latest tagged version of the tutorials. To see the in-development version of the tutorials, go to +[https://tutorials.sciml.ai/dev/](https://tutorials.sciml.ai/dev/). + +Static outputs in pdf, markdown, and html reside in [SciMLTutorialsOutput](https://github.com/SciML/SciMLTutorialsOutput). + +## Video Tutorial + +[![Video Tutorial](https://user-images.githubusercontent.com/1814174/36342812-bdfd0606-13b8-11e8-9eff-ff219de909e5.PNG)](https://youtu.be/KPEqYtEd-zY) + ## Interactive Notebooks -To run the tutorials interactively via Jupyter notebooks, install the package -and open the tutorials like: +To generate the interactive notebooks, first install the SciMLTutorials, instantiate the +environment, and then run `SciMLTutorials.open_notebooks()`. This looks as follows: ```julia -using Pkg -pkg"add https://github.com/SciML/SciMLTutorials.jl" +]add SciMLTutorials#master +]activate SciMLTutorials +]instantiate using SciMLTutorials SciMLTutorials.open_notebooks() ``` -## Video Tutorial +The tutorials will be generated at your `pwd()` in a folder called `generated_notebooks`. -[![Video Tutorial](https://user-images.githubusercontent.com/1814174/36342812-bdfd0606-13b8-11e8-9eff-ff219de909e5.PNG)](https://youtu.be/KPEqYtEd-zY) +Note that when running the tutorials, the packages are not automatically added. Thus you +will need to add the packages manually or use the internal Project/Manifest tomls to +instantiate the correct packages. This can be done by activating the folder of the tutorials. +For example, -## Table of Contents - -- Introduction - - [Introduction to DifferentialEquations.jl through ODEs](http://tutorials.juliadiffeq.org/html/introduction/01-ode_introduction.html) - - [Detecting Stiffness and Choosing an ODE Algorithm](http://tutorials.juliadiffeq.org/html/introduction/02-choosing_algs.html) - - [Optimizing your DiffEq Code](http://tutorials.juliadiffeq.org/html/introduction/03-optimizing_diffeq_code.html) - - [Callbacks and Event Handling](http://tutorials.juliadiffeq.org/html/introduction/04-callbacks_and_events.html) - - [Formatting Plots](http://tutorials.juliadiffeq.org/html/introduction/05-formatting_plots.html) -- Exercise Sheets - - [DifferentialEquations.jl Workshop Exercises](http://tutorials.juliadiffeq.org/html/exercises/01-workshop_exercises.html) - - [DifferentialEquations.jl Workshop Exercise Solutions](http://tutorials.juliadiffeq.org/html/exercises/02-workshop_solutions.html) -- Modeling Examples - - [Classical Physics Models](http://tutorials.juliadiffeq.org/html/models/01-classical_physics.html) - - [Conditional Dosing Example](http://tutorials.juliadiffeq.org/html/models/02-conditional_dosing.html) - - [DiffEqBiological Tutorial I: Introduction](http://tutorials.juliadiffeq.org/html/models/03-diffeqbio_I_introduction.html) - - [DiffEqBiological Tutorial II: Network Properties API](http://tutorials.juliadiffeq.org/html/models/04-diffeqbio_II_networkproperties.html) - - [DiffEqBiological Tutorial III: Steady-States and Bifurcations](http://tutorials.juliadiffeq.org/html/models/04b-diffeqbio_III_steadystates.html) - - [Kepler Problem Orbit](http://tutorials.juliadiffeq.org/html/models/05-kepler_problem.html) - - [Spiking Neural Systems](http://tutorials.juliadiffeq.org/html/models/08-spiking_neural_systems.html) -- Advanced ODE Features - - [ModelingToolkit.jl, An IR and Compiler for Scientific Models](http://tutorials.juliadiffeq.org/html/ode_extras/01-ModelingToolkit.html) - - [Feagin's Order 10, 12, and 14 Methods](http://tutorials.juliadiffeq.org/html/ode_extras/02-feagin.html) - - [Finding Maxima and Minima of DiffEq Solutions](http://tutorials.juliadiffeq.org/html/ode_extras/03-ode_minmax.html) -- Model Inference - - [Bayesian Inference of Pendulum Parameters](http://tutorials.juliadiffeq.org/html/model_inference/01-pendulum_bayesian_inference.html) - - [Monte Carlo Parameter Estimation from Data](http://tutorials.juliadiffeq.org/html/model_inference/02-monte_carlo_parameter_estim.html) -- Type Handling - - [Solving Equations with Julia-Defined Types](http://tutorials.juliadiffeq.org/html/type_handling/01-number_types.html) - - [Numbers with Uncertainties](http://tutorials.juliadiffeq.org/html/type_handling/02-uncertainties.html) - - [Unit Check Arithmetic via Unitful.jl](http://tutorials.juliadiffeq.org/html/type_handling/03-unitful.html) -- DiffEqUncertainty - - [An Intro to Expectations via DiffEqUncertainty.jl](http://tutorials.juliadiffeq.org/html/DiffEqUncertainty/01-expectation_introduction.html) - - [Optimization Under Uncertainty with DiffEqUncertainty.jl](http://tutorials.juliadiffeq.org/html/DiffEqUncertainty/02-AD_and_optimization.html) - - [GPU-Accelerated Data-Driven Bayesian Uncertainty Quantification with Koopman Operators](http://tutorials.juliadiffeq.org/html/DiffEqUncertainty/03-GPU_Bayesian_Koopman.html) -- Advanced - - [A 2D Cardiac Electrophysiology Model (CUDA-accelerated PDE solver)](http://tutorials.juliadiffeq.org/html/advanced/01-beeler_reuter.html) - - [Solving Stiff Equations](http://tutorials.juliadiffeq.org/html/advanced/02-advanced_ODE_solving.html) - - [Kolmogorov Backward Equations](http://tutorials.juliadiffeq.org/html/advanced/03-kolmogorov_equations.html) -- Perturbation Theory - - [Mixed Symbolic/Numerical Methods for Perturbation Theory - Algebraic Equations](http://tutorials.juliadiffeq.org/html/perturbation/01-perturbation_algebraic.html) - - [Mixed Symbolic/Numerical Methods for Perturbation Theory - Differential Equations](http://tutorials.juliadiffeq.org/html/perturbation/02-perturbation_differential.html) +```julia +using Pkg +Pkg.activate(joinpath(pkgdir(SciMLTutorials),"tutorials","models")) +Pkg.instantiate() +``` +will add all of the packages required to run any tutorial in the `models` folder. ## Contributing -First of all, make sure that your current directory is `SciMLTutorials`. All -of the files are generated from the Weave.jl files in the `tutorials` folder. +All of the files are generated from the Weave.jl files in the `tutorials` folder. The generation process runs automatically, +and thus one does not necessarily need to test the Weave process locally. Instead, simply open a PR that adds/updates a +file in the "tutorials" folder and the PR will generate the tutorial on demand. Its artifacts can then be inspected in the +Buildkite as described below before merging. Note that it will use the Project.toml and Manifest.toml of the subfolder, so +any changes to dependencies requires that those are updated. + +### Reporting Bugs and Issues + +Report any bugs or issues at [the SciMLTutorials repository](https://github.com/SciML/SciMLTutorials.jl/issues). + +### Inspecting Tutorial Results + +To see tutorial results before merging, click into the BuildKite, click onto +Artifacts, and then investigate the trained results. + +![](https://user-images.githubusercontent.com/1814174/118359358-02ddc980-b551-11eb-8a9b-24de947cefee.PNG) + +### Manually Generating Files + To run the generation process, do for example: ```julia -using Pkg, SciMLTutorials -cd(joinpath(dirname(pathof(SciMLTutorials)), "..")) -Pkg.pkg"activate ." -Pkg.pkg"instantiate" -SciMLTutorials.weave_file("introduction","01-ode_introduction.jmd") +]activate SciMLTutorials # Get all of the packages +using SciMLTutorials +SciMLTutorials.weave_file(joinpath(pkgdir(SciMLTutorials),"tutorials","models"),"01-classical_physics.jmd") +``` + +To generate all of the files in a folder, for example, run: + +```julia +SciMLTutorials.weave_folder(joinpath(pkgdir(SciMLTutorials),"tutorials","models")) ``` To generate all of the notebooks, do: @@ -90,6 +99,5 @@ To generate all of the notebooks, do: SciMLTutorials.weave_all() ``` -If you add new tutorials which require new packages, simply updating your local -environment will change the project and manifest files. When this occurs, the -updated environment files should be included in the PR. +Each of the tuturials displays the computer characteristics at the bottom of +the benchmark. diff --git a/docs/Project.toml b/docs/Project.toml new file mode 100644 index 00000000..dfa65cd1 --- /dev/null +++ b/docs/Project.toml @@ -0,0 +1,2 @@ +[deps] +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" diff --git a/docs/extrasrc/assets/favicon.ico b/docs/extrasrc/assets/favicon.ico new file mode 100644 index 00000000..3c6bd470 Binary files /dev/null and b/docs/extrasrc/assets/favicon.ico differ diff --git a/docs/extrasrc/assets/logo.png b/docs/extrasrc/assets/logo.png new file mode 100644 index 00000000..6f4c3e26 Binary files /dev/null and b/docs/extrasrc/assets/logo.png differ diff --git a/docs/make.jl b/docs/make.jl new file mode 100644 index 00000000..19fec3c8 --- /dev/null +++ b/docs/make.jl @@ -0,0 +1,36 @@ +using Documenter, SciMLTutorialsOutput + +dir = @__DIR__() * "/.." + +@show dir +@show readdir(dir) + +include("pages.jl") + +mathengine = MathJax3(Dict(:loader => Dict("load" => ["[tex]/require", "[tex]/mathtools"]), + :tex => Dict("inlineMath" => [["\$", "\$"], ["\\(", "\\)"]], + "packages" => [ + "base", + "ams", + "autoload", + "mathtools", + "require" + ]))) + +makedocs( + sitename = "The SciML Tutorials", + authors = "Chris Rackauckas", + modules = [SciMLTutorialsOutput], + clean = true, doctest = false, + format = Documenter.HTML(#analytics = "UA-90474609-3", + assets = ["assets/favicon.ico"], + canonical = "https://tutorials.sciml.ai/stable/", + mathengine = mathengine), + pages = pages +) + +deploydocs(; + repo = "github.com/SciML/SciMLTutorialsOutput", + devbranch = "main", + branch = "main" +) diff --git a/docs/pages.jl b/docs/pages.jl new file mode 100644 index 00000000..e8f8df4e --- /dev/null +++ b/docs/pages.jl @@ -0,0 +1,50 @@ +# This file assumes `dir` is the directory for the package! dir = @__DIR__() * "/.." + +dir = @__DIR__() * "/.." + +cp(joinpath(dir, "markdown"), joinpath(dir, "docs", "src"), force = true) +cp(joinpath(dir, "docs", "extrasrc", "assets"), joinpath(dir, "docs", "src", "assets"), force = true) +cp(joinpath(dir, "README.md"), joinpath(dir, "docs", "src", "index.md"), force = true) +tutorialsdir = joinpath(dir, "docs", "src") + +pages = Any["SciMLTutorials.jl: Tutorials for Scientific Machine Learning (SciML), Equation Solvers, and AI for Science" => "index.md"] + +for folder in readdir(tutorialsdir) + newpages = Any[] + if folder[(end - 2):end] != ".md" && folder != "Testing" && folder != "figures" && + folder != "assets" + for file in filter(x -> x[(end - 2):end] == ".md", readdir( + joinpath(tutorialsdir, folder))) + try + filecontents = readlines(joinpath(tutorialsdir, folder, file)) + title = filecontents[3][9:(end - 1)] + + # Cut out the first 5 lines from the file to remove the Weave header stuff + open(joinpath(tutorialsdir, folder, file), "w") do output + println(output, "# $title") + for line in Iterators.drop(filecontents, 4) + println(output, line) + end + end + push!(newpages, title => joinpath(folder, file)) + catch e + @show folder, file, e + end + end + push!(pages, folder => newpages) + end +end + +# The result is in alphabetical order, change to the wanted order + +permute!(pages, + [1] +) + +names = [ + "SciMLTutorials.jl: Tutorials for Scientific Machine Learning (SciML) and Equation Solvers" +] + +for i in 1:length(pages) + pages[i] = names[i] => pages[i][2] +end diff --git a/docs/src/markdown/blank.jl b/docs/src/markdown/blank.jl new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/docs/src/markdown/blank.jl @@ -0,0 +1 @@ + diff --git a/src/SciMLTutorials.jl b/src/SciMLTutorials.jl index 8d09990e..7841c3bf 100644 --- a/src/SciMLTutorials.jl +++ b/src/SciMLTutorials.jl @@ -2,81 +2,87 @@ module SciMLTutorials using Weave, Pkg, IJulia, InteractiveUtils, Markdown -repo_directory = joinpath(@__DIR__,"..") +repo_directory = joinpath(@__DIR__, "..") cssfile = joinpath(@__DIR__, "..", "templates", "skeleton_css.css") latexfile = joinpath(@__DIR__, "..", "templates", "julia_tex.tpl") +default_builds = (:script, :github) -function weave_file(folder,file,build_list=(:script,:html,:pdf,:github,:notebook)) - target = joinpath(folder, file) - @info("Weaving $(target)") - - if isfile(joinpath(folder, "Project.toml")) - @info("Instantiating", folder) - Pkg.activate(folder) - Pkg.instantiate() - Pkg.build() - end - - args = Dict{Symbol,String}(:folder=>folder,:file=>file) - if :script ∈ build_list - println("Building Script") - dir = joinpath(repo_directory,"script",basename(folder)) - mkpath(dir) - tangle(target; out_path=dir) - end - if :html ∈ build_list - println("Building HTML") - dir = joinpath(repo_directory,"html",basename(folder)) - mkpath(dir) - weave(target,doctype = "md2html",out_path=dir,args=args,css=cssfile,fig_ext=".svg") - end - if :pdf ∈ build_list - println("Building PDF") - dir = joinpath(repo_directory,"pdf",basename(folder)) - mkpath(dir) - try - weave(target,doctype="md2pdf",out_path=dir,template=latexfile,args=args) - catch ex - @warn "PDF generation failed" exception=(ex, catch_backtrace()) +function weave_file(folder, file, build_list = default_builds) + target = joinpath(folder, file) + @info("Weaving $(target)") + + if isfile(joinpath(folder, "Project.toml")) && build_list != (:notebook,) + @info("Instantiating", folder) + Pkg.activate(joinpath(folder)) + Pkg.instantiate() + Pkg.build() + + @info("Printing out `Pkg.status()`") + Pkg.status() end - end - if :github ∈ build_list - println("Building Github Markdown") - dir = joinpath(repo_directory,"markdown",basename(folder)) - mkpath(dir) - weave(target,doctype = "github",out_path=dir,args=args) - end - if :notebook ∈ build_list - println("Building Notebook") - dir = joinpath(repo_directory,"notebook",basename(folder)) - mkpath(dir) - Weave.convert_doc(target,joinpath(dir,file[1:end-4]*".ipynb")) - end -end -function weave_all() - for folder in readdir(joinpath(repo_directory,"tutorials")) - folder == "test.jmd" && continue - weave_folder(folder) - end + args = Dict{Symbol, String}(:folder=>folder, :file=>file) + if :script ∈ build_list + println("Building Script") + dir = joinpath(repo_directory, "script", basename(folder)) + mkpath(dir) + tangle(target; out_path = dir) + end + if :html ∈ build_list + println("Building HTML") + dir = joinpath(repo_directory, "html", basename(folder)) + mkpath(dir) + weave(target, doctype = "md2html", out_path = dir, + args = args, css = cssfile, fig_ext = ".svg") + end + if :pdf ∈ build_list + println("Building PDF") + dir = joinpath(repo_directory, "pdf", basename(folder)) + mkpath(dir) + try + weave(target, doctype = "md2pdf", out_path = dir, + template = latexfile, args = args) + catch ex + @warn "PDF generation failed" exception=(ex, catch_backtrace()) + end + end + if :github ∈ build_list + println("Building Github Markdown") + dir = joinpath(repo_directory, "markdown", basename(folder)) + mkpath(dir) + weave(target, doctype = "github", out_path = dir, args = args) + end + if :notebook ∈ build_list + println("Building Notebook") + dir = joinpath(repo_directory, "notebook", basename(folder)) + mkpath(dir) + Weave.convert_doc(target, joinpath(dir, file[1:(end - 4)]*".ipynb")) + end end -function weave_folder(folder) - for file in readdir(folder) - # Skip non-`.jmd` files - if !endswith(file, ".jmd") - continue +function weave_all(build_list = default_builds) + for folder in readdir(joinpath(repo_directory, "tutorials")) + folder == "test.jmd" && continue + weave_folder(joinpath(repo_directory, "tutorials", folder), build_list) end +end - try - weave_file(folder,file) - catch e - @error(e) +function weave_folder(folder, build_list = default_builds) + for file in readdir(joinpath(folder)) + # Skip non-`.jmd` files + if !endswith(file, ".jmd") + continue + end + + try + weave_file(folder, file, build_list) + catch e + @error(e) + end end - end end -function tutorial_footer(folder=nothing, file=nothing) +function tutorial_footer(folder = nothing, file = nothing) display(md""" ## Appendix @@ -105,8 +111,8 @@ function tutorial_footer(folder=nothing, file=nothing) Package Information: """) - proj = sprint(io -> Pkg.status(io=io)) - mani = sprint(io -> Pkg.status(io=io, mode = Pkg.PKGMODE_MANIFEST)) + proj = sprint(io -> Pkg.status(io = io)) + mani = sprint(io -> Pkg.status(io = io, mode = Pkg.PKGMODE_MANIFEST)) md = """ ``` @@ -123,9 +129,12 @@ function tutorial_footer(folder=nothing, file=nothing) end function open_notebooks() - Base.eval(Main, Meta.parse("import IJulia")) - path = joinpath(repo_directory,"notebook") - IJulia.notebook(;dir=path) + Base.eval(Main, Meta.parse("import IJulia")) + weave_all((:notebook,)) + path = joinpath(repo_directory, "notebook") + newpath = joinpath(pwd(), "generated_notebooks") + mv(path, newpath) + IJulia.notebook(; dir = newpath) end end diff --git a/test/runtests.jl b/test/runtests.jl index 38cc0159..8d7d6e65 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,2 +1,3 @@ -using SciMLTutorials -SciMLTutorials.weave_file("Testing","test.jmd") +using SciMLTutorials +tutorials_dir = joinpath(dirname(@__DIR__), "tutorials") +SciMLTutorials.weave_file(joinpath(tutorials_dir, "Testing"), "test.jmd") diff --git a/tutorials/DiffEqUncertainty/01-expectation_introduction.jmd b/tutorials/DiffEqUncertainty/01-expectation_introduction.jmd deleted file mode 100644 index 6360431b..00000000 --- a/tutorials/DiffEqUncertainty/01-expectation_introduction.jmd +++ /dev/null @@ -1,352 +0,0 @@ ---- -title: An Intro to Expectations via DiffEqUncertainty.jl -author: Adam Gerlach ---- - -## System Model - -First, lets consider the following linear model. - -$$u' = p u$$ - -```julia -f(u,p,t) = p.*u -``` - -We then wish to solve this model on the timespan `t=0.0` to `t=10.0`, with an intial condition `u0=10.0` and parameter `p=-0.3`. We can then setup the differential equations, solve, and plot as follows - -```julia -using DifferentialEquations, Plots -u0 = [10.0] -p = [-0.3] -tspan = (0.0,10.0) -prob = ODEProblem(f,u0,tspan,p) -sol = solve(prob) -plot(sol) -ylims!(0.0,10.0) -``` - -However, what if we wish to consider a random initial condition? Assume `u0` is distributed uniformly from `-10.0` to `10.0`, i.e., - -```julia -using Distributions -u0_dist = [Uniform(-10.0,10.0)] -``` - -We can then run a Monte Carlo simulation of 100,000 trajectories by solving an `EnsembleProblem`. - -```julia -prob_func(prob,i,repeat) = remake(prob, u0 = rand.(u0_dist)) -ensemble_prob = EnsembleProblem(prob,prob_func=prob_func) - -ensemblesol = solve(ensemble_prob,Tsit5(),EnsembleThreads(),trajectories=100000) -``` - -Plotting the first 250 trajectories produces - -```julia -plot(ensemblesol, vars = (0,1), lw=1,alpha=0.1, label=nothing, idxs = 1:250) -``` - - -Given the ensemble solution, we can then compute the expectation of a function $g\left(\cdot\right)$ of the system state `u` at any time in the timespan, e.g. the state itself at `t=4.0` as - -```julia -g(sol) = sol(4.0) -mean([g(sol) for sol in ensemblesol]) -``` - -Alternatively, DiffEqUncertainty.jl offers a convenient interface for this type of calculation, `expectation()`. - -```julia -using DiffEqUncertainty -expectation(g, prob, u0_dist, p, MonteCarlo(), Tsit5(); trajectories=100000) -``` - -`expectation()` takes the function of interest $g$, an `ODEProblem`, the initial conditions and parameters, and an `AbstractExpectationAlgorithm`. Here we use `MonteCarlo()` to use the Monte Carlo algorithm. Note that the initial conditions and parameters can be arrays that freely mix numeric and continuous distribution types from Distributions.jl. Recall, that `u0_dist = [Uniform(-10.0,10.0)]`, while `p = [-0.3]`. From this specification, the expectation is solved as - -$$\mathbb{E}\left[g\left(X\right)\vert X\sim Pf\right]$$ - -where $Pf$ is the "push-forward" density of the initial joint pdf $f$ on initial conditions and parameters. - -Alternatively, we could solve the same problem using the `Koopman()` algorithm. - -```julia -expectation(g, prob, u0_dist, p, Koopman(), Tsit5()) -``` - -Being that this system is linear, we can analytically compute the solution as a deterministic ODE with its initial condition set to the expectation of the initial condition, i.e., - -$$e^{pt}\mathbb{E}\left[u_0\right]$$ - -```julia -exp(p[1]*4.0)*mean(u0_dist[1]) -``` - -We see that for this case the `Koopman()` algorithm produces a more accurate solution than `MonteCarlo()`. Not only is it more accurate, it is also much faster - -```julia -@time expectation(g, prob, u0_dist, p, MonteCarlo(), Tsit5(); trajectories=100000) -``` - -```julia -@time expectation(g, prob, u0_dist, p, Koopman(), Tsit5()) -``` - -Changing the distribution, we arrive at - -```julia -u0_dist = [Uniform(0.0,10.0)] -@time expectation(g, prob, u0_dist, p, MonteCarlo(), Tsit5(); trajectories=100_000) -``` -and -```julia -@time expectation(g, prob, u0_dist, p, Koopman(), Tsit5())[1] -``` -where the analytical solution is -```julia -exp(p[1]*4.0)*mean(u0_dist[1]) -``` - -Note that the `Koopman()` algorithm doesn't currently support infinite or semi-infinite integration domains, where the integration domain is determined by the extrema of the given distributions. So, trying to using a `Normal` distribution will produce `NaN` - -```julia -u0_dist = [Normal(3.0,2.0)] -expectation(g, prob, u0_dist, p, Koopman(), Tsit5()) -``` - -Here, the analytical solution is - -```julia -exp(p[1]*4.0)*mean(u0_dist[1]) -``` - -Using a truncated distribution will alleviate this problem. However, there is another gotcha. If a large majority of the probability mass of the distribution exists in a small region in the support, then the adaptive methods used to solve the expectation can "miss" the non-zero portions of the distribution and errantly return 0.0. - -```julia -u0_dist = [truncated(Normal(3.0,2.0),-1000,1000)] -expectation(g, prob, u0_dist, p, Koopman(), Tsit5()) -``` - -whereas truncating at $\pm 4\sigma$ produces the correct result -```julia -u0_dist = [truncated(Normal(3.0,2.0),-5,11)] -expectation(g, prob, u0_dist, p, Koopman(), Tsit5()) -``` - -If a large truncation is required, it is best practice to center the distribution on the truncated interval. This is because many of the underlying quadrature algorithms use the center of the interval as an evaluation point. - -```julia -u0_dist = [truncated(Normal(3.0,2.0),3-1000,3+1000)] -expectation(g, prob, u0_dist, p, Koopman(), Tsit5()) -``` - -## Vector-Valued Functions -`expectation()` can also handle vector-valued functions. Simply pass the vector-valued function and set the `nout` kwarg to the length of the vector the function returns. - -Here, we demonstrate this by computing the expectation of `u` at `t=4.0s` and `t=6.0s` - -```julia -g(sol) = [sol(4.0)[1], sol(6.0)[1]] -expectation(g, prob, u0_dist, p, Koopman(), Tsit5(); nout = 2) -``` -with analytical solution -```julia -exp.(p.*[4.0,6.0])*mean(u0_dist[1]) -``` - -this can be used to compute the expectation at a range of times simultaneously -```julia -saveat = tspan[1]:.5:tspan[2] -g(sol) = Matrix(sol) -mean_koop = expectation(g, prob, u0_dist, p, Koopman(), Tsit5(); nout = length(saveat), saveat=saveat) -``` - -We can then plot these values along with the analytical solution - -```julia -plot(t->exp(p[1]*t)*mean(u0_dist[1]),tspan..., xlabel="t", label="analytical") -scatter!(collect(saveat),mean_koop.u[:],marker=:o, label=nothing) -``` - -### Benefits of Using Vector-Valued Functions -In the above examples we used vector-valued expectation calculations to compute the various expectations required. Alternatively, one could simply compute multiple scalar-valued expectations. However, in most cases it is more efficient to use the vector-valued form. This is especially true when the ODE to be solved is computationally expensive. - -To demonstrate this, lets compute the expectation of $x$, $x^2$, and $x^3$ using both approaches while counting the number of times `g()` is evaluated. This is the same as the number of simulation runs required to arrive at the solution. First, consider the scalar-valued approach. Here, we follow the same method as before, but we add a counter to our function evaluation that stores the number of function calls for each expectation calculation to an array. - -```julia -function g(sol, power, counter) - counter[power] = counter[power] + 1 - sol(4.0)[1]^power -end - -counters = [0,0,0] -x_koop = expectation(s->g(s,1,counters), prob, u0_dist, p, Koopman(), Tsit5()) -x2_koop = expectation(s->g(s,2,counters), prob, u0_dist, p, Koopman(), Tsit5()) -x3_koop = expectation(s->g(s,3,counters), prob, u0_dist, p, Koopman(), Tsit5()) -counters -``` - -Leading to a total of `j sum(counters)` function evaluations. - -Now, lets compare this to the vector-valued approach -```julia -function g(sol, counter) - counter[1] = counter[1] + 1 - v = sol(4.0)[1] - [v, v^2, v^3] -end - -counter = [0] -expectation(s->g(s,counter), prob, u0_dist, p, Koopman(), Tsit5(); nout = 3) -counter -``` - -This is `j round(counter[1]/sum(counters)*100,digits=2)`% the number of simulations required when using scalar-valued expectations. Note how the number of evaluations used in the vector-valued form is equivelent to the maximum number of evaluations for the 3 scalar-valued expectation calls. - -## Higher-Order Moments -Leveraging this vector-valued capability, we can also efficiently compute higher-order central moments. - -### Variance -The variance, or 2nd central moment, of a random variable $X$ is defined as - -$$\mathrm{Var}\left(X\right)=\mathbb{E}\left[\left(X-\mu\right)^2\right]$$ - -where - -$$\mu = \mathbb{E}\left[X\right]$$ - -The expression for the variance can be expanded to - -$$\mathrm{Var}\left(X\right)=\mathbb{E}\left[X^2\right]-\mathbb{E}\left[X\right]^2$$ - -Using this, we define a function that returns the expectations of $X$ and $X^2$ as a vector-valued function and then compute the variance from these - -```julia -function g(sol) - x = sol(4.0)[1] - [x, x^2] -end - -koop = expectation(g, prob, u0_dist, p, Koopman(), Tsit5(); nout = 2) -mean_koop = koop[1] -var_koop = koop[2] - mean_koop^2 -``` - -For a linear system, we can propagate the variance analytically as - -$e^{2pt}\mathrm{Var}\left(u_0\right)$ - -```julia -exp(2*p[1]*4.0)*var(u0_dist[1]) -``` - -This can be computed at multiple time instances as well - -```julia -saveat = tspan[1]:.5:tspan[2] -g(sol) = [Matrix(sol)'; (Matrix(sol).^2)'] - -koop = expectation(g, prob, u0_dist, p, Koopman(), Tsit5(); nout = length(saveat)*2, saveat=saveat) -μ = koop.u[1:length(saveat)] -σ = sqrt.(koop.u[length(saveat)+1:end] - μ.^2) - -plot(t->exp(p[1]*t)*mean(u0_dist[1]),tspan..., ribbon = t->-sqrt(exp(2*p[1]*t)*var(u0_dist[1])), label="Analytical Mean, 1 std bounds") -scatter!(collect(saveat),μ,marker=:x, yerror = σ, c=:black, label = "Koopman Mean, 1 std bounds") -``` - -### Skewness -A similar approach can be used to compute skewness - -```julia -function g(sol) - v = sol(4.0)[1] - [v, v^2, v^3] -end - -koop = expectation(g, prob, u0_dist, p, Koopman(), Tsit5(); nout = 3) -mean_koop = koop[1] -var_koop = koop[2] - mean_koop^2 -(koop[3] - 3.0*mean_koop*var_koop - mean_koop^3) / var_koop^(3/2) -``` - -As the system is linear, we expect the skewness to be unchanged from the inital distribution. Becasue the distribution is a truncated Normal distribution centered on the mean, the true skewness is `0.0`. - -### nth Central Moment -DiffEqUncertainty provides a convenience function `centralmoment` around this approach for higher-order central moments. It takes an integer for the number of central moments you wish to compute. While the rest of the arguments are the same as for `expectation()`. The following will return central moments 1-5. - -```julia -g(sol) = sol(4.0)[1] -centralmoment(5, g, prob, u0_dist, p, Koopman(), Tsit5(), - ireltol = 1e-9, iabstol = 1e-9) -``` - -## Batch-Mode -It is also possible to solve the various simulations in parallel by using the `batch` kwarg and a batch-mode supported quadrature algorithm via the `quadalg` kwarg. To view the list of batch compatible quadrature algorithms, refer to [Quadrature.jl](https://github.com/SciML/Quadrature.jl). Note: Batch-mode operation is built on top of DifferentialEquation.jl's `EnsembleProblem`. See the [EnsembleProblem documentation](https://diffeq.sciml.ai/stable/features/ensemble/) for additional options. - -The default quadtrature algorithm used by `expectation()` does not support batch-mode evaluation. So, we first load dependencies for additional quadrature algorithms - -```julia -using Quadrature, Cuba -``` - -We then solve our expectation as before using a `batch=10` multi-thread parallelization via `EnsembleThreads()` of Cuba's SUAVE algorithm. However, in this case we introduce additional uncertainty in the model parameter. - -```julia -u0_dist = [truncated(Normal(3.0,2.0),-5,11)] -p_dist = [truncated(Normal(-.7, .1), -1,0)] - -g(sol) = sol(6.0)[1] - -expectation(g, prob, u0_dist, p_dist, Koopman(), Tsit5(), EnsembleThreads(); - quadalg = CubaSUAVE(), batch=10)[1] -``` - -Now, lets compare the performance of the batch and non-batch modes - -```julia -using BenchmarkTools - -@btime expectation(g, prob, u0_dist, p_dist, Koopman(), Tsit5(); - quadalg = CubaSUAVE())[1] -``` - -```julia -@btime expectation(g, prob, u0_dist, p_dist, Koopman(), Tsit5(), EnsembleThreads(); - quadalg = CubaSUAVE(), batch=10)[1] -``` - -It is also possible to parallelize across the GPU. However, one must be careful of the limitations of ensemble solutions with the GPU. Please refer to [DiffEqGPU.jl](https://github.com/SciML/DiffEqGPU.jl) for details. - -Here we load `DiffEqGPU` and modify our problem to use Float32 and to put the ODE in the required GPU form - -```julia -using DiffEqGPU - -function f(du, u,p,t) - @inbounds begin - du[1] = p[1]*u[1]; - end - nothing -end - -u0 = Float32[10.0] -p = Float32[-0.3] -tspan = (0.0f0,10.0f0) -prob = ODEProblem(f,u0,tspan,p) - -g(sol) = sol(6.0)[1] - -u0_dist = [truncated(Normal(3.0f0,2.0f0),-5f0,11f0)] -p_dist = [truncated(Normal(-.7f0, .1f0), -1f0,0f0)] - -@btime expectation(g, prob, u0_dist, p_dist, Koopman(), Tsit5(), EnsembleGPUArray(); - quadalg = CubaSUAVE(), batch=1000)[1] -``` - -The performance gains realized by leveraging batch GPU processing is problem dependent. In this case, the number of batch evaluations required to overcome the overhead of using the GPU exceeds the number of simulations required to converge to the quadrature solution. - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/DiffEqUncertainty/02-AD_and_optimization.jmd b/tutorials/DiffEqUncertainty/02-AD_and_optimization.jmd deleted file mode 100644 index c9c7e95d..00000000 --- a/tutorials/DiffEqUncertainty/02-AD_and_optimization.jmd +++ /dev/null @@ -1,298 +0,0 @@ ---- -title: Optimization Under Uncertainty with DiffEqUncertainty.jl -author: Adam Gerlach ---- - -This tutorial gives and overview of how to leverage the efficient Koopman expectation method from DiffEqUncertainty to perform optimization under uncertainty. We demonstrate this by using a bouncing ball model with an uncertain model parameter. We also demonstrate its application to problems with probabilistic constraints, in particular a special class of constraints called chance constraints. - -## System Model -First lets consider a 2D bouncing ball, where the states are the horizontal position $x$, horizontal velocity $\dot{x}$, vertical position $y$, and vertical velocity $\dot{y}$. This model has two system parameters, acceleration due to gravity and coefficient of restitution (models energy loss when the ball impacts the ground). We can simulate such a system using `ContinuousCallback` as - -```julia -using OrdinaryDiffEq, Plots - -function ball!(du,u,p,t) - du[1] = u[2] - du[2] = 0.0 - du[3] = u[4] - du[4] = -p[1] -end - -ground_condition(u,t,integrator) = u[3] -ground_affect!(integrator) = integrator.u[4] = -integrator.p[2] * integrator.u[4] -ground_cb = ContinuousCallback(ground_condition, ground_affect!) - -u0 = [0.0,2.0,50.0,0.0] -tspan = (0.0,50.0) -p = [9.807, 0.9] - -prob = ODEProblem(ball!,u0,tspan,p) -sol = solve(prob,Tsit5(),callback=ground_cb) -plot(sol, vars=(1,3), label = nothing, xlabel="x", ylabel="y") -``` - -For this particular problem, we wish to measure the impact distance from a point $y=25$ on a wall at $x=25$. So, we introduce an additional callback that terminates the simulation on wall impact. - -```julia; results = "hidden" -stop_condition(u,t,integrator) = u[1] - 25.0 -stop_cb = ContinuousCallback(stop_condition, terminate!) -cbs = CallbackSet(ground_cb, stop_cb) - -tspan = (0.0, 1500.0) -prob = ODEProblem(ball!,u0,tspan,p) -sol = solve(prob,Tsit5(),callback=cbs) -``` - -To help visualize this problem, we plot as follows, where the star indicates a desired impace location -```julia -rectangle(xc, yc, w, h) = Shape(xc .+ [-w,w,w,-w]./2.0, yc .+ [-h,-h,h,h]./2.0) - -begin - plot(sol, vars=(1,3), label=nothing, lw = 3, c=:black) - xlabel!("x [m]") - ylabel!("y [m]") - plot!(rectangle(27.5, 25, 5, 50), c=:red, label = nothing) - scatter!([25],[25],marker=:star, ms=10, label = nothing,c=:green) - ylims!(0.0,50.0) -end -``` - -## Considering Uncertainty -We now wish to introduce uncertainty in `p[2]`, the coefficient of restitution. This is defined via a continuous univiate distribution from Distributions.jl. We can then run a Monte Carlo simulation of 100,000 trajectories via the `EnsembleProblem` interface. - -```julia; results = "hidden" -using Distributions - -cor_dist = truncated(Normal(0.9, 0.02), 0.9-3*0.02, 1.0) -trajectories = 100000 - -prob_func(prob,i,repeat) = remake(prob, p = [p[1], rand(cor_dist)]) -ensemble_prob = EnsembleProblem(prob,prob_func=prob_func) -ensemblesol = solve(ensemble_prob,Tsit5(),EnsembleThreads(),trajectories=trajectories, callback=cbs) - -begin # plot - plot(ensemblesol, vars = (1,3), lw=1,alpha=0.2, label=nothing, idxs = 1:350) - xlabel!("x [m]") - ylabel!("y [m]") - plot!(rectangle(27.5, 25, 5, 50), c=:red, label = nothing) - scatter!([25],[25],marker=:star, ms=10, label = nothing, c=:green) - plot!(sol, vars=(1,3), label=nothing, lw = 3, c=:black, ls=:dash) - xlims!(0.0,27.5) -end -``` - -Here, we plot the first 350 Monte Carlo simulations along with the trajectory corrresponding to the mean of the distribution (dashed line). - -We now wish to compute the expected squared impact distance from the star. This is called an "observation" of our system or an "observable" of interest. - -We define this observable as - -```julia -obs(sol) = abs2(sol[3,end]-25) -``` - -With the observable defined, we can compute the expected squared miss distance from our Monte Carlo simulation results as - -```julia -mean_ensemble = mean([obs(sol) for sol in ensemblesol]) -``` - -Alternatively, we can use the `Koopman()` algorithm in DiffEqUncertainty.jl to compute this expectation much more efficiently as - -```julia -using DiffEqUncertainty - -p_uncertain = [9.807, cor_dist] -expectation(obs, prob, u0, p_uncertain, Koopman(), Tsit5(); - ireltol = 1e-5, callback=cbs) -``` - -## Optimization Under Uncertainty -We now wish to optimize the initial position ($x_0,y_0$) and horizontal velocity ($\dot{x}_0$) of the system to minimize the expected squared miss distance from the star, where $x_0\in\left[-100,0\right]$, $y_0\in\left[1,3\right]$, and $\dot{x}_0\in\left[10,50\right]$. We will demonstrate this using a gradient-based optimization approach from NLopt.jl using `ForwardDiff.jl` AD through the expectation calculation. - -First, we load the required packages and define our loss function - -```julia -using NLopt, DiffEqSensitivity, ForwardDiff - -make_u0(θ) = [θ[1],θ[2],θ[3], 0.0] - -function 𝔼_loss(θ) # \bbE - u0 = make_u0(θ) - expectation(obs, prob, u0, p_uncertain, Koopman(), Tsit5(); - ireltol = 1e-5, callback=cbs)[1] -end -``` - -NLopt requires that this loss function return the loss as above, but also do an inplace update of the gradient. So, we wrap this function to put it in the form required by NLopt. - -```julia -function 𝔼_loss_nlopt(x,∇) - length(∇) > 0 ? ForwardDiff.gradient!(∇, 𝔼_loss,x) : nothing - 𝔼_loss(x) -end -``` - -We then optimize using the [Method of Moving Asymptotes](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/#mma-method-of-moving-asymptotes-and-ccsa) algorithm (`:LD_MMA`) - -```julia -opt = Opt(:LD_MMA, 3) -opt.lower_bounds = [-100.0,1.0, 10.0] -opt.upper_bounds = [0.0,3.0, 50.0] -opt.xtol_rel = 1e-3 -opt.min_objective = 𝔼_loss_nlopt -(minf,minx,ret) = NLopt.optimize(opt, [-1.0, 2.0, 50.0]) -``` - -Let's now visualize 350 Monte Carlo simulations - -```julia -ensembleprob = EnsembleProblem(remake(prob,u0 = make_u0(minx)),prob_func=prob_func) -ensemblesol = solve(ensembleprob,Tsit5(),EnsembleThreads(), trajectories=100_000, callback=cbs) - -begin - plot(ensemblesol, vars = (1,3), lw=1,alpha=0.1, label=nothing, idxs = 1:350) - plot!(solve(remake(prob, u0=make_u0(minx)),Tsit5(), callback=cbs), - vars=(1,3),label = nothing, c=:black, lw=3,ls=:dash) - xlabel!("x [m]") - ylabel!("y [m]") - plot!(rectangle(27.5, 25, 5, 50), c=:red, label = nothing) - scatter!([25],[25],marker=:star, ms=10, label = nothing,c=:green) - ylims!(0.0,50.0) - xlims!(minx[1], 27.5) -end -``` - -Looks pretty good! But, how long did it take? Let's benchmark. - -```julia -using BenchmarkTools - -@btime NLopt.optimize($opt, $[-1.0, 2.0, 50.0]) -``` -Not bad for bound constrained optimization under uncertainty of a hybrid system! - -## Probabilistic Constraints - -With this approach we can also consider probabilistic constraints. Let us now consider a wall at $x=20$ with height 25. - -```julia -constraint = [20.0, 25.0] -begin - plot(rectangle(27.5, 25, 5, 50), c=:red, label = nothing) - xlabel!("x [m]") - ylabel!("y [m]") - plot!([constraint[1], constraint[1]],[0.0,constraint[2]], lw=5, c=:black, label=nothing) - scatter!([25],[25],marker=:star, ms=10, label = nothing,c=:green) - ylims!(0.0,50.0) - xlims!(minx[1], 27.5) -end -``` - -We now wish to minimize the same loss function as before, but introduce an inequality constraint such that the solution must have less than a 1% chance of colliding with the wall at $x=20$. This class of probabilistic constraints is called a chance constraint. - -To do this, we first introduce a new callback and solve the system using the previous optimal solution - -```julia -constraint_condition(u,t,integrator) = u[1] - constraint[1] -constraint_affect!(integrator) = integrator.u[3] < constraint[2] ? terminate!(integrator) : nothing -constraint_cb = ContinuousCallback(constraint_condition, constraint_affect!, save_positions=(true,false)); -constraint_cbs = CallbackSet(ground_cb, stop_cb, constraint_cb) - -ensemblesol = solve(ensembleprob,Tsit5(),EnsembleThreads(), trajectories=350, callback=constraint_cbs, maxstep=0.1) - -begin - plot(ensemblesol, vars = (1,3), lw=1,alpha=0.1, label=nothing) - plot!(solve(remake(prob, u0=make_u0(minx)),Tsit5(), callback=constraint_cbs), - vars=(1,3),label = nothing, c=:black, lw=3, ls=:dash) - - xlabel!("x [m]") - ylabel!("y [m]") - plot!(rectangle(27.5, 25, 5, 50), c=:red, label = nothing) - plot!([constraint[1], constraint[1]],[0.0,constraint[2]], lw=5, c=:black) - scatter!([25],[25],marker=:star, ms=10, label = nothing,c=:green) - ylims!(0.0,50.0) - xlims!(minx[1], 27.5) -end -``` - -That doesn't look good! - -We now need a second observable for the system. In order to compute a probability of impact, we use an indicator function for if a trajectory impacts the wall. In other words, this functions returns 1 if the trajectory hits the wall and 0 otherwise. - -```julia -constraint_obs(sol) = sol[1,end] ≈ constraint[1] ? one(sol[1,end]) : zero(sol[1,end]) -``` - -Using the previously computed optimal initial conditions, lets compute the probability of hitting this wall - -```julia -expectation(constraint_obs, prob, make_u0(minx), p_uncertain, Koopman(), Tsit5(); - ireltol= 1e-9, iabstol = 1e-9, callback=constraint_cbs)[1] -``` - -We then setup the constraint function for NLopt just as before. - -```julia -function 𝔼_constraint(θ) - u0 = [θ[1],θ[2],θ[3], 0.0] - expectation(constraint_obs, prob, u0, p_uncertain, Koopman(), Tsit5(), - ireltol= 1e-9, iabstol = 1e-9,callback=constraint_cbs)[1] -end - -function 𝔼_constraint_nlopt(x,∇) - length(∇) > 0 ? ForwardDiff.gradient!(∇, 𝔼_constraint,x) : nothing - 𝔼_constraint(x) - 0.01 -end -``` - -Note that NLopt requires the constraint function to be of the form $g(x) \leq 0$. Hence, why we return `𝔼_constraint(x) - 0.01` for the 1% chance constraint. - -The rest of the NLopt setup looks the same as before with the exception of adding the inequality constraint - -```julia -opt = Opt(:LD_MMA, 3) -opt.lower_bounds = [-100.0, 1.0, 10.0] -opt.upper_bounds = [0.0, 3.0, 50.0] -opt.xtol_rel = 1e-3 -opt.min_objective = 𝔼_loss_nlopt -inequality_constraint!(opt,𝔼_constraint_nlopt, 1e-5) -(minf2,minx2,ret2) = NLopt.optimize(opt, [-1.0, 2.0, 50.0]) -``` - -The probability of impacting the wall is now - -```julia -λ = 𝔼_constraint(minx2) -``` -We can check if this is within tolerance by -```julia -λ - 0.01 <= 1e-5 -``` - -Again, we plot some Monte Carlo simulations from this result as follows - -```julia -ensembleprob = EnsembleProblem(remake(prob,u0 = make_u0(minx2)),prob_func=prob_func) -ensemblesol = solve(ensembleprob,Tsit5(),EnsembleThreads(), - trajectories=350, callback=constraint_cbs) - -begin - plot(ensemblesol, vars = (1,3), lw=1,alpha=0.1, label=nothing) - plot!(solve(remake(prob, u0=make_u0(minx2)),Tsit5(), callback=constraint_cbs), - vars=(1,3),label = nothing, c=:black, lw=3, ls=:dash) - plot!([constraint[1], constraint[1]],[0.0,constraint[2]], lw=5, c=:black) - - xlabel!("x [m]") - ylabel!("y [m]") - plot!(rectangle(27.5, 25, 5, 50), c=:red, label = nothing) - scatter!([25],[25],marker=:star, ms=10, label = nothing,c=:green) - ylims!(0.0,50.0) - xlims!(minx[1], 27.5) -end -``` - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/DiffEqUncertainty/03-GPU_Bayesian_Koopman.jmd b/tutorials/DiffEqUncertainty/03-GPU_Bayesian_Koopman.jmd deleted file mode 100644 index cb7d182e..00000000 --- a/tutorials/DiffEqUncertainty/03-GPU_Bayesian_Koopman.jmd +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: GPU-Accelerated Data-Driven Bayesian Uncertainty Quantification with Koopman Operators -author: Chris Rackauckas ---- - -What if you have data and a general model and would like to evaluate the -probability that the fitted model outcomes would have had a given behavior? -The purpose of this tutorial is to demonstrate a fast workflow for doing exactly -this. It composes together a few different pieces of the SciML ecosystem: - -1. Parameter estimation with uncertainty with Bayesian differential equations by - integrating the differentiable differential equation solvers with the - [Turing.jl library](https://turing.ml/dev/). -2. Fast calculation of probabilistic estimates of differential equation solutions - with parametric uncertainty using the Koopman expectation. -3. GPU-acceleration of batched differential equation solves. - -Let's dive right in. - -## Bayesian Parameter Estimation with Uncertainty - -Let's start by importing all of the necessary libraries: - -```julia -using Turing, Distributions, DifferentialEquations -using MCMCChains, Plots, StatsPlots -using Random -using DiffEqUncertainty -using KernelDensity, DiffEqUncertainty -using Cuba, DiffEqGPU - -Random.seed!(1); -``` - -For this tutorial we will use the Lotka-Volterra equation: - -```julia -function lotka_volterra(du,u,p,t) - @inbounds begin - x = u[1] - y = u[2] - α = p[1] - β = p[2] - γ = p[3] - δ = p[4] - du[1] = (α - β*y)*x - du[2] = (δ*x - γ)*y - end -end -p = [1.5, 1.0, 3.0, 1.0] -u0 = [1.0,1.0] -prob1 = ODEProblem(lotka_volterra,u0,(0.0,10.0),p) -sol = solve(prob1,Tsit5()) -plot(sol) -``` - -From the Lotka-Volterra equation we will generate a dataset with known parameters: - -```julia -sol1 = solve(prob1,Tsit5(),saveat=0.1) -``` - -Now let's assume our dataset should have noise. We can add this noise in and -plot the noisy data against the generating set: - -```julia -odedata = Array(sol1) + 0.8 * randn(size(Array(sol1))) -plot(sol1, alpha = 0.3, legend = false); scatter!(sol1.t, odedata') -``` - -Now let's assume that all we know is the data `odedata` and the model form. -What we want to do is use the data to inform us of the parameters, but also -get a probabilistic sense of the uncertainty around our parameter estimate. This -is done via Bayesian estimation. For a full look at Bayesian estimation of -differential equations, look at the [Bayesian differential equation](https://turing.ml/dev/tutorials/10-bayesiandiffeq/) -tutorial from Turing.jl. - -Following that tutorial, we choose a set of priors and perform `NUTS` sampling -to arrive at the MCMC chain: - -```julia -Turing.setadbackend(:forwarddiff) - -@model function fitlv(data, prob1) - σ ~ InverseGamma(2, 3) # ~ is the tilde character - α ~ truncated(Normal(1.5,0.5),1.0,2.0) - β ~ truncated(Normal(1.2,0.5),0.5,1.5) - γ ~ truncated(Normal(3.0,0.5),2,4) - δ ~ truncated(Normal(1.0,0.5),0.5,1.5) - - p = [α,β,γ,δ] - prob = remake(prob1, p=p) - predicted = solve(prob,Tsit5(),saveat=0.1) - - for i = 1:length(predicted) - data[:,i] ~ MvNormal(predicted[i], σ) - end -end - -model = fitlv(odedata, prob1) - -# This next command runs 3 independent chains without using multithreading. -chain = mapreduce(c -> sample(model, NUTS(.45),1000), chainscat, 1:3) -``` - -This chain gives a discrete approximation to the probability distribution of our -desired quantites. We can plot the chains to see this distributions in action: - -```julia -plot(chain) -``` - -Great! From our data we have arrived at a probability distribution for the -our parameter values. - -## Evaluating Model Hypotheses with the Koopman Expectation - -Now let's try and ask a question: what is the expected value of `x` (the first -term in the differential equation) at time `t=10` given the known uncertainties -in our parameters? This is a good tutorial question because all other probabilistic -statements can be phrased similarly. Asking a question like, "what is the probability -that `x(T) > 1` at the final time `T`?", can similarly be phrased as an expected -value (probability statements are expected values of characteristic functions -which are 1 if true 0 if false). So in general, the kinds of questions we want -to ask and answer are expectations about the solutions of the differential equation. - -The trivial to solve this problem is to sample 100,000 sets of parameters from -our parameter distribution given by the Bayesian estimation, solve the ODE -100,000 times, and then take the average. But is 100,000 ODE solves enough? -Well it's hard to tell, and even then, the convergence of this approach is slow. -This is the Monte Carlo approach and it converges to the correct answer by -`sqrt(N)`. Slow. - -However, the [Koopman expectation](https://arxiv.org/abs/2008.08737) can converge -with much fewer points, allowing the use of higher order quadrature methods to -converge exponentially faster in many cases. To use the Koopman expectation -functionality provided by [DiffEqUncertainty.jl](https://github.com/SciML/DiffEqUncertainty.jl), -we first need to define our observable function `g`. This function designates the -thing about the solution we wish to calculate the expectation of. Thus for our -question "what is the expected value of `x`at time `t=10`?", we would simply use: - -```julia -function g(sol) - sol[1,end] -end -``` - -Now we need to use the `expectation` call, where we need to provide our initial -condition and parameters as probability distirbutions. For this case, we will use -the same constant `u0` as before. But, let's turn our Bayesian MCMC chains into -distributions through [kernel density estimation](https://github.com/JuliaStats/KernelDensity.jl) -(the plots of the distribution above are just KDE plots!). - -```julia -p_kde = [kde(vec(Array(chain[:α]))),kde(vec(Array(chain[:β]))), - kde(vec(Array(chain[:γ]))),kde(vec(Array(chain[:δ])))] -``` - -Now that we have our observable and our uncertainty distributions, let's calculate -the expected value: - -```julia -expect = expectation(g, prob1, u0, p_kde, Koopman(), Tsit5(), quadalg = CubaCuhre()) -``` - -Note how that gives the expectation and a residual for the error bound! - -```julia -expect.resid -``` - -### GPU-Accelerated Expectations - -Are we done? No, we need to add some GPUs! As mentioned earlier, probability -calculations can take quite a bit of ODE solves, so let's parallelize across -the parameters. [DiffEqGPU.jl](https://github.com/SciML/DiffEqGPU.jl) allows you -to GPU-parallelize across parameters by using the -[Ensemble interface](https://diffeq.sciml.ai/stable/features/ensemble/). Note that -you do not have to do any of the heavy lifting: all of the conversion to GPU -kernels is done automaticaly by simply specifying `EnsembleGPUArray` as the -ensembling method. For example: - -```julia -function lotka_volterra(du,u,p,t) - @inbounds begin - x = u[1] - y = u[2] - α = p[1] - β = p[2] - γ = p[3] - δ = p[4] - du[1] = (α - β*y)*x - du[2] = (δ*x - γ)*y - end -end -p = [1.5, 1.0, 3.0, 1.0] -u0 = [1.0,1.0] -prob = ODEProblem(lotka_volterra,u0,(0.0,10.0),p) -prob_func = (prob,i,repeat) -> remake(prob,p=rand(Float64,4).*p) -monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy=false) -@time sol = solve(monteprob,Tsit5(),EnsembleGPUArray(),trajectories=10_000,saveat=1.0f0) -``` - -Let's now use this in the ensembling method. We need to specify a `batch` for the -number of ODEs solved at the same time, and pass in our enembling method. The -following is a GPU-accelerated uncertainty quanitified estimate of the expectation -of the solution: - -```julia -expectation(g, prob1, u0, p_kde, Koopman(), Tsit5(), EnsembleGPUArray(), batch=100, quadalg = CubaCuhre()) -``` diff --git a/tutorials/DiffEqUncertainty/Project.toml b/tutorials/DiffEqUncertainty/Project.toml deleted file mode 100644 index f43c70d8..00000000 --- a/tutorials/DiffEqUncertainty/Project.toml +++ /dev/null @@ -1,36 +0,0 @@ -[deps] -BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" -Cuba = "8a292aeb-7a57-582c-b821-06e4c11590b1" -DiffEqGPU = "071ae1c0-96b5-11e9-1965-c90190d839ea" -DiffEqSensitivity = "41bf760c-e81c-5289-8e54-58b1f1f8abe2" -DiffEqUncertainty = "ef61062a-5684-51dc-bb67-a0fcdec5c97d" -DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" -Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" -ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -KernelDensity = "5ab0869b-81aa-558d-bb23-cbf5423bbe9b" -MCMCChains = "c7f686f2-ff18-58e9-bc7b-31028e88f75d" -NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd" -OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -Quadrature = "67601950-bd08-11e9-3c89-fd23fb4432d2" -StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" -Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" -SciMLTutorials = "30cb0354-2223-46a9-baa0-41bdcfbe0178" - -[compat] -BenchmarkTools = "0.5, 0.6, 0.7, 1.0" -Cuba = "2.1" -DiffEqGPU = "1.5" -DiffEqSensitivity = "6.28" -DiffEqUncertainty = "1.5" -DifferentialEquations = "6.15" -Distributions = "0.23, 0.24, 0.25" -ForwardDiff = "0.10" -KernelDensity = "0.6" -MCMCChains = "4.4" -NLopt = "0.6" -OrdinaryDiffEq = "5.42" -Plots = "1.5" -Quadrature = "1.3" -StatsPlots = "0.14" -Turing = "0.15" diff --git a/tutorials/Testing/Manifest.toml b/tutorials/Testing/Manifest.toml new file mode 100644 index 00000000..09c3a1aa --- /dev/null +++ b/tutorials/Testing/Manifest.toml @@ -0,0 +1,878 @@ +# This file is machine-generated - editing it directly is not advised + +[[Adapt]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "f1b523983a58802c4695851926203b36e28f09db" +uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +version = "3.3.0" + +[[ArgTools]] +uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" + +[[Artifacts]] +uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" + +[[Base64]] +uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" + +[[Bzip2_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "c3598e525718abcc440f69cc6d5f60dda0a1b61e" +uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0" +version = "1.0.6+5" + +[[Cairo_jll]] +deps = ["Artifacts", "Bzip2_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"] +git-tree-sha1 = "e2f47f6d8337369411569fd45ae5753ca10394c6" +uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a" +version = "1.16.0+6" + +[[ColorSchemes]] +deps = ["ColorTypes", "Colors", "FixedPointNumbers", "Random", "StaticArrays"] +git-tree-sha1 = "c8fd01e4b736013bc61b704871d20503b33ea402" +uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4" +version = "3.12.1" + +[[ColorTypes]] +deps = ["FixedPointNumbers", "Random"] +git-tree-sha1 = "024fe24d83e4a5bf5fc80501a314ce0d1aa35597" +uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f" +version = "0.11.0" + +[[Colors]] +deps = ["ColorTypes", "FixedPointNumbers", "Reexport"] +git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40" +uuid = "5ae59095-9a9b-59fe-a467-6f913c188581" +version = "0.12.8" + +[[Compat]] +deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"] +git-tree-sha1 = "e4e2b39db08f967cc1360951f01e8a75ec441cab" +uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" +version = "3.30.0" + +[[CompilerSupportLibraries_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" + +[[Conda]] +deps = ["JSON", "VersionParsing"] +git-tree-sha1 = "299304989a5e6473d985212c28928899c74e9421" +uuid = "8f4d0f93-b110-5947-807f-2305c1781a2d" +version = "1.5.2" + +[[Contour]] +deps = ["StaticArrays"] +git-tree-sha1 = "9f02045d934dc030edad45944ea80dbd1f0ebea7" +uuid = "d38c429a-6771-53c6-b99e-75d170b6e991" +version = "0.5.7" + +[[DataAPI]] +git-tree-sha1 = "dfb3b7e89e395be1e25c2ad6d7690dc29cc53b1d" +uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a" +version = "1.6.0" + +[[DataStructures]] +deps = ["Compat", "InteractiveUtils", "OrderedCollections"] +git-tree-sha1 = "4437b64df1e0adccc3e5d1adbc3ac741095e4677" +uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" +version = "0.18.9" + +[[DataValueInterfaces]] +git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" +uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464" +version = "1.0.0" + +[[Dates]] +deps = ["Printf"] +uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" + +[[DelimitedFiles]] +deps = ["Mmap"] +uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" + +[[Distributed]] +deps = ["Random", "Serialization", "Sockets"] +uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" + +[[DocStringExtensions]] +deps = ["LibGit2", "Markdown", "Pkg", "Test"] +git-tree-sha1 = "9d4f64f79012636741cf01133158a54b24924c32" +uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" +version = "0.8.4" + +[[Downloads]] +deps = ["ArgTools", "LibCURL", "NetworkOptions"] +uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" + +[[EarCut_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "92d8f9f208637e8d2d28c664051a00569c01493d" +uuid = "5ae413db-bbd1-5e63-b57d-d24a61df00f5" +version = "2.1.5+1" + +[[Expat_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "b3bfd02e98aedfa5cf885665493c5598c350cd2f" +uuid = "2e619515-83b5-522b-bb60-26c02a35a201" +version = "2.2.10+0" + +[[FFMPEG]] +deps = ["FFMPEG_jll", "x264_jll"] +git-tree-sha1 = "9a73ffdc375be61b0e4516d83d880b265366fe1f" +uuid = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" +version = "0.4.0" + +[[FFMPEG_jll]] +deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "LibVPX_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "Pkg", "Zlib_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"] +git-tree-sha1 = "3cc57ad0a213808473eafef4845a74766242e05f" +uuid = "b22a6f82-2f65-5046-a5b2-351ab43fb4e5" +version = "4.3.1+4" + +[[FileWatching]] +uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" + +[[FixedPointNumbers]] +deps = ["Statistics"] +git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc" +uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93" +version = "0.8.4" + +[[Fontconfig_jll]] +deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"] +git-tree-sha1 = "35895cf184ceaab11fd778b4590144034a167a2f" +uuid = "a3f928ae-7b40-5064-980b-68af3947d34b" +version = "2.13.1+14" + +[[Formatting]] +deps = ["Printf"] +git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8" +uuid = "59287772-0a20-5a39-b81b-1366585eb4c0" +version = "0.4.2" + +[[FreeType2_jll]] +deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] +git-tree-sha1 = "cbd58c9deb1d304f5a245a0b7eb841a2560cfec6" +uuid = "d7e528f0-a631-5988-bf34-fe36492bcfd7" +version = "2.10.1+5" + +[[FriBidi_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "0d20aed5b14dd4c9a2453c1b601d08e1149679cc" +uuid = "559328eb-81f9-559d-9380-de523a88c83c" +version = "1.0.5+6" + +[[GLFW_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Pkg", "Xorg_libXcursor_jll", "Xorg_libXi_jll", "Xorg_libXinerama_jll", "Xorg_libXrandr_jll"] +git-tree-sha1 = "a199aefead29c3c2638c3571a9993b564109d45a" +uuid = "0656b61e-2033-5cc2-a64a-77c0f6c09b89" +version = "3.3.4+0" + +[[GR]] +deps = ["Base64", "DelimitedFiles", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Pkg", "Printf", "Random", "Serialization", "Sockets", "Test", "UUIDs"] +git-tree-sha1 = "011458b83178ac913dc4eb73b229af45bdde5d83" +uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71" +version = "0.57.4" + +[[GR_jll]] +deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Pkg", "Qt5Base_jll", "Zlib_jll", "libpng_jll"] +git-tree-sha1 = "90acee5c38f4933342fa9a3bbc483119d20e7033" +uuid = "d2c73de3-f751-5644-a686-071e5b155ba9" +version = "0.57.2+0" + +[[GeometryBasics]] +deps = ["EarCut_jll", "IterTools", "LinearAlgebra", "StaticArrays", "StructArrays", "Tables"] +git-tree-sha1 = "4136b8a5668341e58398bb472754bff4ba0456ff" +uuid = "5c1252a2-5f33-56bf-86c9-59e7332b4326" +version = "0.3.12" + +[[Gettext_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "XML2_jll"] +git-tree-sha1 = "9b02998aba7bf074d14de89f9d37ca24a1a0b046" +uuid = "78b55507-aeef-58d4-861c-77aaff3498b1" +version = "0.21.0+0" + +[[Glib_jll]] +deps = ["Artifacts", "Gettext_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Libiconv_jll", "Libmount_jll", "PCRE_jll", "Pkg", "Zlib_jll"] +git-tree-sha1 = "47ce50b742921377301e15005c96e979574e130b" +uuid = "7746bdde-850d-59dc-9ae8-88ece973131d" +version = "2.68.1+0" + +[[Grisu]] +git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2" +uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe" +version = "1.0.2" + +[[HTTP]] +deps = ["Base64", "Dates", "IniFile", "MbedTLS", "NetworkOptions", "Sockets", "URIs"] +git-tree-sha1 = "1fd26bc48f96adcdd8823f7fc300053faf3d7ba1" +uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3" +version = "0.9.9" + +[[Highlights]] +deps = ["DocStringExtensions", "InteractiveUtils", "REPL"] +git-tree-sha1 = "f823a2d04fb233d52812c8024a6d46d9581904a4" +uuid = "eafb193a-b7ab-5a9e-9068-77385905fa72" +version = "0.4.5" + +[[IJulia]] +deps = ["Base64", "Conda", "Dates", "InteractiveUtils", "JSON", "Libdl", "Markdown", "MbedTLS", "Pkg", "Printf", "REPL", "Random", "SoftGlobalScope", "Test", "UUIDs", "ZMQ"] +git-tree-sha1 = "d8b9c31196e1dd92181cd0f5760ca2d2ffb4ac0f" +uuid = "7073ff75-c697-5162-941a-fcdaad2a7d2a" +version = "1.23.2" + +[[IniFile]] +deps = ["Test"] +git-tree-sha1 = "098e4d2c533924c921f9f9847274f2ad89e018b8" +uuid = "83e8ac13-25f8-5344-8a64-a9f2b223428f" +version = "0.5.0" + +[[InteractiveUtils]] +deps = ["Markdown"] +uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" + +[[IterTools]] +git-tree-sha1 = "05110a2ab1fc5f932622ffea2a003221f4782c18" +uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e" +version = "1.3.0" + +[[IteratorInterfaceExtensions]] +git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856" +uuid = "82899510-4779-5014-852e-03e436cf321d" +version = "1.0.0" + +[[JLLWrappers]] +deps = ["Preferences"] +git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e" +uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" +version = "1.3.0" + +[[JSON]] +deps = ["Dates", "Mmap", "Parsers", "Unicode"] +git-tree-sha1 = "81690084b6198a2e1da36fcfda16eeca9f9f24e4" +uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" +version = "0.21.1" + +[[JpegTurbo_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "9aff0587d9603ea0de2c6f6300d9f9492bbefbd3" +uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8" +version = "2.0.1+3" + +[[LAME_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "df381151e871f41ee86cee4f5f6fd598b8a68826" +uuid = "c1c5ebd0-6772-5130-a774-d5fcae4a789d" +version = "3.100.0+3" + +[[LZO_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "e5b909bcf985c5e2605737d2ce278ed791b89be6" +uuid = "dd4b983a-f0e5-5f8d-a1b7-129d4a5fb1ac" +version = "2.10.1+0" + +[[LaTeXStrings]] +git-tree-sha1 = "c7f1c695e06c01b95a67f0cd1d34994f3e7db104" +uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" +version = "1.2.1" + +[[Latexify]] +deps = ["Formatting", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "Printf", "Requires"] +git-tree-sha1 = "f77a16cb3804f4a74f57e5272a6a4a9a628577cb" +uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" +version = "0.15.5" + +[[LibCURL]] +deps = ["LibCURL_jll", "MozillaCACerts_jll"] +uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" + +[[LibCURL_jll]] +deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] +uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" + +[[LibGit2]] +deps = ["Base64", "NetworkOptions", "Printf", "SHA"] +uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" + +[[LibSSH2_jll]] +deps = ["Artifacts", "Libdl", "MbedTLS_jll"] +uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" + +[[LibVPX_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "85fcc80c3052be96619affa2fe2e6d2da3908e11" +uuid = "dd192d2f-8180-539f-9fb4-cc70b1dcf69a" +version = "1.9.0+1" + +[[Libdl]] +uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" + +[[Libffi_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "761a393aeccd6aa92ec3515e428c26bf99575b3b" +uuid = "e9f186c6-92d2-5b65-8a66-fee21dc1b490" +version = "3.2.2+0" + +[[Libgcrypt_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgpg_error_jll", "Pkg"] +git-tree-sha1 = "64613c82a59c120435c067c2b809fc61cf5166ae" +uuid = "d4300ac3-e22c-5743-9152-c294e39db1e4" +version = "1.8.7+0" + +[[Libglvnd_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll", "Xorg_libXext_jll"] +git-tree-sha1 = "7739f837d6447403596a75d19ed01fd08d6f56bf" +uuid = "7e76a0d4-f3c7-5321-8279-8d96eeed0f29" +version = "1.3.0+3" + +[[Libgpg_error_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "c333716e46366857753e273ce6a69ee0945a6db9" +uuid = "7add5ba3-2f88-524e-9cd5-f83b8a55f7b8" +version = "1.42.0+0" + +[[Libiconv_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "8d22e127ea9a0917bc98ebd3755c8bd31989381e" +uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531" +version = "1.16.1+0" + +[[Libmount_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "9c30530bf0effd46e15e0fdcf2b8636e78cbbd73" +uuid = "4b2f31a3-9ecc-558c-b454-b3730dcb73e9" +version = "2.35.0+0" + +[[Libtiff_jll]] +deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Pkg", "Zlib_jll", "Zstd_jll"] +git-tree-sha1 = "291dd857901f94d683973cdf679984cdf73b56d0" +uuid = "89763e89-9b03-5906-acba-b20f662cd828" +version = "4.1.0+2" + +[[Libuuid_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "7f3efec06033682db852f8b3bc3c1d2b0a0ab066" +uuid = "38a345b3-de98-5d2b-a5d3-14cd9215e700" +version = "2.36.0+0" + +[[LinearAlgebra]] +deps = ["Libdl"] +uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" + +[[Logging]] +uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" + +[[MacroTools]] +deps = ["Markdown", "Random"] +git-tree-sha1 = "6a8a2a625ab0dea913aba95c11370589e0239ff0" +uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" +version = "0.5.6" + +[[Markdown]] +deps = ["Base64"] +uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" + +[[MbedTLS]] +deps = ["Dates", "MbedTLS_jll", "Random", "Sockets"] +git-tree-sha1 = "1c38e51c3d08ef2278062ebceade0e46cefc96fe" +uuid = "739be429-bea8-5141-9913-cc70e7f3736d" +version = "1.0.3" + +[[MbedTLS_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" + +[[Measures]] +git-tree-sha1 = "e498ddeee6f9fdb4551ce855a46f54dbd900245f" +uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e" +version = "0.3.1" + +[[Missings]] +deps = ["DataAPI"] +git-tree-sha1 = "4ea90bd5d3985ae1f9a908bd4500ae88921c5ce7" +uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28" +version = "1.0.0" + +[[Mmap]] +uuid = "a63ad114-7e13-5084-954f-fe012c677804" + +[[MozillaCACerts_jll]] +uuid = "14a3606d-f60d-562e-9121-12d972cd8159" + +[[Mustache]] +deps = ["Printf", "Tables"] +git-tree-sha1 = "36995ef0d532fe08119d70b2365b7b03d4e00f48" +uuid = "ffc61752-8dc7-55ee-8c37-f3e9cdd09e70" +version = "1.0.10" + +[[NaNMath]] +git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb" +uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" +version = "0.3.5" + +[[NetworkOptions]] +uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" + +[[Ogg_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "a42c0f138b9ebe8b58eba2271c5053773bde52d0" +uuid = "e7412a2a-1a6e-54c0-be00-318e2571c051" +version = "1.3.4+2" + +[[OpenSSL_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "71bbbc616a1d710879f5a1021bcba65ffba6ce58" +uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" +version = "1.1.1+6" + +[[Opus_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "f9d57f4126c39565e05a2b0264df99f497fc6f37" +uuid = "91d4177d-7536-5919-b921-800302f37372" +version = "1.3.1+3" + +[[OrderedCollections]] +git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c" +uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" +version = "1.4.1" + +[[PCRE_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "b2a7af664e098055a7529ad1a900ded962bca488" +uuid = "2f80f16e-611a-54ab-bc61-aa92de5b98fc" +version = "8.44.0+0" + +[[Parsers]] +deps = ["Dates"] +git-tree-sha1 = "c8abc88faa3f7a3950832ac5d6e690881590d6dc" +uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" +version = "1.1.0" + +[[Pixman_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "b4f5d02549a10e20780a24fce72bea96b6329e29" +uuid = "30392449-352a-5448-841d-b1acce4e97dc" +version = "0.40.1+0" + +[[Pkg]] +deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] +uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" + +[[PlotThemes]] +deps = ["PlotUtils", "Requires", "Statistics"] +git-tree-sha1 = "a3a964ce9dc7898193536002a6dd892b1b5a6f1d" +uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a" +version = "2.0.1" + +[[PlotUtils]] +deps = ["ColorSchemes", "Colors", "Dates", "Printf", "Random", "Reexport", "Statistics"] +git-tree-sha1 = "ae9a295ac761f64d8c2ec7f9f24d21eb4ffba34d" +uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043" +version = "1.0.10" + +[[Plots]] +deps = ["Base64", "Contour", "Dates", "FFMPEG", "FixedPointNumbers", "GR", "GeometryBasics", "JSON", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs"] +git-tree-sha1 = "f3a57a5acc16a69c03539b3684354cbbbb72c9ad" +uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" +version = "1.15.2" + +[[Preferences]] +deps = ["TOML"] +git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a" +uuid = "21216c6a-2e73-6563-6e65-726566657250" +version = "1.2.2" + +[[Printf]] +deps = ["Unicode"] +uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" + +[[Qt5Base_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "Fontconfig_jll", "Glib_jll", "JLLWrappers", "Libdl", "Libglvnd_jll", "OpenSSL_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libxcb_jll", "Xorg_xcb_util_image_jll", "Xorg_xcb_util_keysyms_jll", "Xorg_xcb_util_renderutil_jll", "Xorg_xcb_util_wm_jll", "Zlib_jll", "xkbcommon_jll"] +git-tree-sha1 = "16626cfabbf7206d60d84f2bf4725af7b37d4a77" +uuid = "ea2cea3b-5b76-57ae-a6ef-0a8af62496e1" +version = "5.15.2+0" + +[[REPL]] +deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] +uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" + +[[Random]] +deps = ["Serialization"] +uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" + +[[RecipesBase]] +git-tree-sha1 = "b3fb709f3c97bfc6e948be68beeecb55a0b340ae" +uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" +version = "1.1.1" + +[[RecipesPipeline]] +deps = ["Dates", "NaNMath", "PlotUtils", "RecipesBase"] +git-tree-sha1 = "7a5026a6741c14147d1cb6daf2528a77ca28eb51" +uuid = "01d81517-befc-4cb6-b9ec-a95719d0359c" +version = "0.3.2" + +[[Reexport]] +git-tree-sha1 = "57d8440b0c7d98fc4f889e478e80f268d534c9d5" +uuid = "189a3867-3050-52da-a836-e630ba90ab69" +version = "1.0.0" + +[[Requires]] +deps = ["UUIDs"] +git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621" +uuid = "ae029012-a4dd-5104-9daa-d747884805df" +version = "1.1.3" + +[[SHA]] +uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" + +[[SciMLTutorials]] +deps = ["IJulia", "InteractiveUtils", "Pkg", "Plots", "Weave"] +git-tree-sha1 = "6d721be72323edd91679318c05aca8479bc7b20f" +uuid = "30cb0354-2223-46a9-baa0-41bdcfbe0178" +version = "0.9.0" + +[[Scratch]] +deps = ["Dates"] +git-tree-sha1 = "ad4b278adb62d185bbcb6864dc24959ab0627bf6" +uuid = "6c6a2e73-6563-6170-7368-637461726353" +version = "1.0.3" + +[[Serialization]] +uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" + +[[SharedArrays]] +deps = ["Distributed", "Mmap", "Random", "Serialization"] +uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383" + +[[Showoff]] +deps = ["Dates", "Grisu"] +git-tree-sha1 = "91eddf657aca81df9ae6ceb20b959ae5653ad1de" +uuid = "992d4aef-0814-514b-bc4d-f2e9a6c4116f" +version = "1.0.3" + +[[Sockets]] +uuid = "6462fe0b-24de-5631-8697-dd941f90decc" + +[[SoftGlobalScope]] +deps = ["REPL"] +git-tree-sha1 = "986ec2b6162ccb95de5892ed17832f95badf770c" +uuid = "b85f4697-e234-5449-a836-ec8e2f98b302" +version = "1.1.0" + +[[SortingAlgorithms]] +deps = ["DataStructures"] +git-tree-sha1 = "2ec1962eba973f383239da22e75218565c390a96" +uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" +version = "1.0.0" + +[[SparseArrays]] +deps = ["LinearAlgebra", "Random"] +uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + +[[StaticArrays]] +deps = ["LinearAlgebra", "Random", "Statistics"] +git-tree-sha1 = "c635017268fd51ed944ec429bcc4ad010bcea900" +uuid = "90137ffa-7385-5640-81b9-e52037218182" +version = "1.2.0" + +[[Statistics]] +deps = ["LinearAlgebra", "SparseArrays"] +uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" + +[[StatsAPI]] +git-tree-sha1 = "1958272568dc176a1d881acb797beb909c785510" +uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0" +version = "1.0.0" + +[[StatsBase]] +deps = ["DataAPI", "DataStructures", "LinearAlgebra", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"] +git-tree-sha1 = "2f6792d523d7448bbe2fec99eca9218f06cc746d" +uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" +version = "0.33.8" + +[[StructArrays]] +deps = ["Adapt", "DataAPI", "Tables"] +git-tree-sha1 = "44b3afd37b17422a62aea25f04c1f7e09ce6b07f" +uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" +version = "0.5.1" + +[[TOML]] +deps = ["Dates"] +uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" + +[[TableTraits]] +deps = ["IteratorInterfaceExtensions"] +git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39" +uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c" +version = "1.0.1" + +[[Tables]] +deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "TableTraits", "Test"] +git-tree-sha1 = "c9d2d262e9a327be1f35844df25fe4561d258dc9" +uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" +version = "1.4.2" + +[[Tar]] +deps = ["ArgTools", "SHA"] +uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" + +[[Test]] +deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] +uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[[URIs]] +git-tree-sha1 = "97bbe755a53fe859669cd907f2d96aee8d2c1355" +uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4" +version = "1.3.0" + +[[UUIDs]] +deps = ["Random", "SHA"] +uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" + +[[Unicode]] +uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" + +[[VersionParsing]] +git-tree-sha1 = "80229be1f670524750d905f8fc8148e5a8c4537f" +uuid = "81def892-9a0e-5fdd-b105-ffc91e053289" +version = "1.2.0" + +[[Wayland_jll]] +deps = ["Artifacts", "Expat_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg", "XML2_jll"] +git-tree-sha1 = "dc643a9b774da1c2781413fd7b6dcd2c56bb8056" +uuid = "a2964d1f-97da-50d4-b82a-358c7fce9d89" +version = "1.17.0+4" + +[[Wayland_protocols_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll"] +git-tree-sha1 = "2839f1c1296940218e35df0bbb220f2a79686670" +uuid = "2381bf8a-dfd0-557d-9999-79630e7b1b91" +version = "1.18.0+4" + +[[Weave]] +deps = ["Base64", "Dates", "Highlights", "JSON", "Markdown", "Mustache", "Pkg", "Printf", "REPL", "Requires", "Serialization", "YAML"] +git-tree-sha1 = "4afd286cd80d1c2c338f9a13356298feac7348d0" +uuid = "44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9" +version = "0.10.8" + +[[XML2_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"] +git-tree-sha1 = "1acf5bdf07aa0907e0a37d3718bb88d4b687b74a" +uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a" +version = "2.9.12+0" + +[[XSLT_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgcrypt_jll", "Libgpg_error_jll", "Libiconv_jll", "Pkg", "XML2_jll", "Zlib_jll"] +git-tree-sha1 = "91844873c4085240b95e795f692c4cec4d805f8a" +uuid = "aed1982a-8fda-507f-9586-7b0439959a61" +version = "1.1.34+0" + +[[Xorg_libX11_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll", "Xorg_xtrans_jll"] +git-tree-sha1 = "5be649d550f3f4b95308bf0183b82e2582876527" +uuid = "4f6342f7-b3d2-589e-9d20-edeb45f2b2bc" +version = "1.6.9+4" + +[[Xorg_libXau_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "4e490d5c960c314f33885790ed410ff3a94ce67e" +uuid = "0c0b7dd1-d40b-584c-a123-a41640f87eec" +version = "1.0.9+4" + +[[Xorg_libXcursor_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXfixes_jll", "Xorg_libXrender_jll"] +git-tree-sha1 = "12e0eb3bc634fa2080c1c37fccf56f7c22989afd" +uuid = "935fb764-8cf2-53bf-bb30-45bb1f8bf724" +version = "1.2.0+4" + +[[Xorg_libXdmcp_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "4fe47bd2247248125c428978740e18a681372dd4" +uuid = "a3789734-cfe1-5b06-b2d0-1dd0d9d62d05" +version = "1.1.3+4" + +[[Xorg_libXext_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] +git-tree-sha1 = "b7c0aa8c376b31e4852b360222848637f481f8c3" +uuid = "1082639a-0dae-5f34-9b06-72781eeb8cb3" +version = "1.3.4+4" + +[[Xorg_libXfixes_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] +git-tree-sha1 = "0e0dc7431e7a0587559f9294aeec269471c991a4" +uuid = "d091e8ba-531a-589c-9de9-94069b037ed8" +version = "5.0.3+4" + +[[Xorg_libXi_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXfixes_jll"] +git-tree-sha1 = "89b52bc2160aadc84d707093930ef0bffa641246" +uuid = "a51aa0fd-4e3c-5386-b890-e753decda492" +version = "1.7.10+4" + +[[Xorg_libXinerama_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll"] +git-tree-sha1 = "26be8b1c342929259317d8b9f7b53bf2bb73b123" +uuid = "d1454406-59df-5ea1-beac-c340f2130bc3" +version = "1.1.4+4" + +[[Xorg_libXrandr_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll"] +git-tree-sha1 = "34cea83cb726fb58f325887bf0612c6b3fb17631" +uuid = "ec84b674-ba8e-5d96-8ba1-2a689ba10484" +version = "1.5.2+4" + +[[Xorg_libXrender_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] +git-tree-sha1 = "19560f30fd49f4d4efbe7002a1037f8c43d43b96" +uuid = "ea2f1a96-1ddc-540d-b46f-429655e07cfa" +version = "0.9.10+4" + +[[Xorg_libpthread_stubs_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "6783737e45d3c59a4a4c4091f5f88cdcf0908cbb" +uuid = "14d82f49-176c-5ed1-bb49-ad3f5cbd8c74" +version = "0.1.0+3" + +[[Xorg_libxcb_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "XSLT_jll", "Xorg_libXau_jll", "Xorg_libXdmcp_jll", "Xorg_libpthread_stubs_jll"] +git-tree-sha1 = "daf17f441228e7a3833846cd048892861cff16d6" +uuid = "c7cfdc94-dc32-55de-ac96-5a1b8d977c5b" +version = "1.13.0+3" + +[[Xorg_libxkbfile_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] +git-tree-sha1 = "926af861744212db0eb001d9e40b5d16292080b2" +uuid = "cc61e674-0454-545c-8b26-ed2c68acab7a" +version = "1.1.0+4" + +[[Xorg_xcb_util_image_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] +git-tree-sha1 = "0fab0a40349ba1cba2c1da699243396ff8e94b97" +uuid = "12413925-8142-5f55-bb0e-6d7ca50bb09b" +version = "0.4.0+1" + +[[Xorg_xcb_util_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll"] +git-tree-sha1 = "e7fd7b2881fa2eaa72717420894d3938177862d1" +uuid = "2def613f-5ad1-5310-b15b-b15d46f528f5" +version = "0.4.0+1" + +[[Xorg_xcb_util_keysyms_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] +git-tree-sha1 = "d1151e2c45a544f32441a567d1690e701ec89b00" +uuid = "975044d2-76e6-5fbe-bf08-97ce7c6574c7" +version = "0.4.0+1" + +[[Xorg_xcb_util_renderutil_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] +git-tree-sha1 = "dfd7a8f38d4613b6a575253b3174dd991ca6183e" +uuid = "0d47668e-0667-5a69-a72c-f761630bfb7e" +version = "0.3.9+1" + +[[Xorg_xcb_util_wm_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] +git-tree-sha1 = "e78d10aab01a4a154142c5006ed44fd9e8e31b67" +uuid = "c22f9ab0-d5fe-5066-847c-f4bb1cd4e361" +version = "0.4.1+1" + +[[Xorg_xkbcomp_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxkbfile_jll"] +git-tree-sha1 = "4bcbf660f6c2e714f87e960a171b119d06ee163b" +uuid = "35661453-b289-5fab-8a00-3d9160c6a3a4" +version = "1.4.2+4" + +[[Xorg_xkeyboard_config_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xkbcomp_jll"] +git-tree-sha1 = "5c8424f8a67c3f2209646d4425f3d415fee5931d" +uuid = "33bec58e-1273-512f-9401-5d533626f822" +version = "2.27.0+4" + +[[Xorg_xtrans_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "79c31e7844f6ecf779705fbc12146eb190b7d845" +uuid = "c5fb5394-a638-5e4d-96e5-b29de1b5cf10" +version = "1.4.0+3" + +[[YAML]] +deps = ["Base64", "Dates", "Printf"] +git-tree-sha1 = "78c02bd295bbd0ca330f95e07ccdfcb69f6cbcd4" +uuid = "ddb6d928-2868-570f-bddf-ab3f9cf99eb6" +version = "0.4.6" + +[[ZMQ]] +deps = ["FileWatching", "Sockets", "ZeroMQ_jll"] +git-tree-sha1 = "fc68e8a3719166950a0f3e390a14c7302c48f8de" +uuid = "c2297ded-f4af-51ae-bb23-16f91089e4e1" +version = "1.2.1" + +[[ZeroMQ_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "libsodium_jll"] +git-tree-sha1 = "74a74a3896b63980734cc876da8a103454559fe8" +uuid = "8f1865be-045e-5c20-9c9f-bfbfb0764568" +version = "4.3.2+6" + +[[Zlib_jll]] +deps = ["Libdl"] +uuid = "83775a58-1f1d-513f-b197-d71354ab007a" + +[[Zstd_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "cc4bf3fdde8b7e3e9fa0351bdeedba1cf3b7f6e6" +uuid = "3161d3a3-bdf6-5164-811a-617609db77b4" +version = "1.5.0+0" + +[[libass_jll]] +deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] +git-tree-sha1 = "acc685bcf777b2202a904cdcb49ad34c2fa1880c" +uuid = "0ac62f75-1d6f-5e53-bd7c-93b484bb37c0" +version = "0.14.0+4" + +[[libfdk_aac_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "7a5780a0d9c6864184b3a2eeeb833a0c871f00ab" +uuid = "f638f0a6-7fb0-5443-88ba-1cc74229b280" +version = "0.1.6+4" + +[[libpng_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] +git-tree-sha1 = "94d180a6d2b5e55e447e2d27a29ed04fe79eb30c" +uuid = "b53b4c65-9356-5827-b1ea-8c7a1a84506f" +version = "1.6.38+0" + +[[libsodium_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "848ab3d00fe39d6fbc2a8641048f8f272af1c51e" +uuid = "a9144af2-ca23-56d9-984f-0d03f7b5ccf8" +version = "1.0.20+0" + +[[libvorbis_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Ogg_jll", "Pkg"] +git-tree-sha1 = "fa14ac25af7a4b8a7f61b287a124df7aab601bcd" +uuid = "f27f6e37-5d2b-51aa-960f-b287f2bc3b7a" +version = "1.3.6+6" + +[[nghttp2_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" + +[[p7zip_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" + +[[x264_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "d713c1ce4deac133e3334ee12f4adff07f81778f" +uuid = "1270edf5-f2f9-52d2-97e9-ab00b5d0237a" +version = "2020.7.14+2" + +[[x265_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "487da2f8f2f0c8ee0e83f39d13037d6bbf0a45ab" +uuid = "dfaa095f-4041-5dcd-9319-2fabd8486b76" +version = "3.0.0+3" + +[[xkbcommon_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll", "Wayland_protocols_jll", "Xorg_libxcb_jll", "Xorg_xkeyboard_config_jll"] +git-tree-sha1 = "ece2350174195bb31de1a63bea3a41ae1aa593b6" +uuid = "d8fb68d0-12a3-5cfd-a85a-d49703b185fd" +version = "0.9.1+5" diff --git a/tutorials/Testing/Project.toml b/tutorials/Testing/Project.toml index 29dbc2f3..9c4e0a35 100644 --- a/tutorials/Testing/Project.toml +++ b/tutorials/Testing/Project.toml @@ -1,2 +1,5 @@ [deps] SciMLTutorials = "30cb0354-2223-46a9-baa0-41bdcfbe0178" + +[compat] +SciMLTutorials = "0.9, 1" diff --git a/tutorials/Testing/test.jmd b/tutorials/Testing/test.jmd index a6764948..4a909381 100644 --- a/tutorials/Testing/test.jmd +++ b/tutorials/Testing/test.jmd @@ -1,11 +1,11 @@ ---- -title: Test -author: Chris Rackauckas ---- - -This is a test of the builder system. - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` +--- +title: Test +author: Chris Rackauckas +--- + +This is a test of the builder system. It often gets bumped manually. + +```julia, echo = false, skip="notebook" +using SciMLTutorials +SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder], WEAVE_ARGS[:file]) +``` diff --git a/tutorials/advanced/01-beeler_reuter.jmd b/tutorials/advanced/01-beeler_reuter.jmd deleted file mode 100644 index 3c935d9f..00000000 --- a/tutorials/advanced/01-beeler_reuter.jmd +++ /dev/null @@ -1,661 +0,0 @@ ---- -title: An Implicit/Explicit CUDA-Accelerated Solver for the 2D Beeler-Reuter Model -author: Shahriar Iravanian ---- - -## Background - -[SciML](https://github.com/SciML) is a suite of optimized Julia libraries to solve ordinary differential equations (ODE). *SciML* provides a large number of explicit and implicit solvers suited for different types of ODE problems. It is possible to reduce a system of partial differential equations into an ODE problem by employing the [method of lines (MOL)](https://en.wikipedia.org/wiki/Method_of_lines). The essence of MOL is to discretize the spatial derivatives (by finite difference, finite volume or finite element methods) into algebraic equations and to keep the time derivatives as is. The resulting differential equations are left with only one independent variable (time) and can be solved with an ODE solver. [Solving Systems of Stochastic PDEs and using GPUs in Julia](http://www.stochasticlifestyle.com/solving-systems-stochastic-pdes-using-gpus-julia/) is a brief introduction to MOL and using GPUs to accelerate PDE solving in *JuliaDiffEq*. Here we expand on this introduction by developing an implicit/explicit (IMEX) solver for a 2D cardiac electrophysiology model and show how to use [CuArray](https://github.com/JuliaGPU/CuArrays.jl) and [CUDAnative](https://github.com/JuliaGPU/CUDAnative.jl) libraries to run the explicit part of the model on a GPU. - -Note that this tutorial does not use the [higher order IMEX methods built into DifferentialEquations.jl](https://docs.sciml.ai/latest/solvers/split_ode_solve/#Implicit-Explicit-(IMEX)-ODE-1) but instead shows how to hand-split an equation when the explicit portion has an analytical solution (or approxiate), which is common in many scenarios. - -There are hundreds of ionic models that describe cardiac electrical activity in various degrees of detail. Most are based on the classic [Hodgkin-Huxley model](https://en.wikipedia.org/wiki/Hodgkin%E2%80%93Huxley_model) and define the time-evolution of different state variables in the form of nonlinear first-order ODEs. The state vector for these models includes the transmembrane potential, gating variables, and ionic concentrations. The coupling between cells is through the transmembrame potential only and is described as a reaction-diffusion equation, which is a parabolic PDE, - -$$\partial V / \partial t = \nabla (D \nabla V) - \frac {I_\text{ion}} {C_m},$$ - -where $V$ is the transmembrane potential, $D$ is a diffusion tensor, $I_\text{ion}$ is the sum of the transmembrane currents and is calculated from the ODEs, and $C_m$ is the membrane capacitance and is usually assumed to be constant. Here we model a uniform and isotropic medium. Therefore, the model can be simplified to, - -$$\partial V / \partial t = D \Delta{V} - \frac {I_\text{ion}} {C_m},$$ - -where $D$ is now a scalar. By nature, these models have to deal with different time scales and are therefore classified as *stiff*. Commonly, they are solved using the explicit Euler method, usually with a closed form for the integration of the gating variables (the Rush-Larsen method, see below). We can also solve these problems using implicit or semi-implicit PDE solvers (e.g., the [Crank-Nicholson method](https://en.wikipedia.org/wiki/Crank%E2%80%93Nicolson_method) combined with an iterative solver). Higher order explicit methods such as Runge-Kutta and linear multi-step methods cannot overcome the stiffness and are not particularly helpful. - -In this tutorial, we first develop a CPU-only IMEX solver and then show how to move the explicit part to a GPU. - -### The Beeler-Reuter Model - -We have chosen the [Beeler-Reuter ventricular ionic model](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1283659/) as our example. It is a classic model first described in 1977 and is used as a base for many other ionic models. It has eight state variables, which makes it complicated enough to be interesting without obscuring the main points of the exercise. The eight state variables are: the transmembrane potential ($V$), sodium-channel activation and inactivation gates ($m$ and $h$, similar to the Hodgkin-Huxley model), with an additional slow inactivation gate ($j$), calcium-channel activation and deactivations gates ($d$ and $f$), a time-dependent inward-rectifying potassium current gate ($x_1$), and intracellular calcium concentration ($c$). There are four currents: a sodium current ($i_{Na}$), a calcium current ($i_{Ca}$), and two potassium currents, one time-dependent ($i_{x_1}$) and one background time-independent ($i_{K_1}$). - -## CPU-Only Beeler-Reuter Solver - -Let's start by developing a CPU only IMEX solver. The main idea is to use the *DifferentialEquations* framework to handle the implicit part of the equation and code the analytical approximation for explicit part separately. If no analytical approximation was known for the explicit part, one could use methods from [this list](https://docs.sciml.ai/latest/solvers/split_ode_solve/#Implicit-Explicit-(IMEX)-ODE-1). - -First, we define the model constants: - -```julia -const v0 = -84.624 -const v1 = 10.0 -const C_K1 = 1.0f0 -const C_x1 = 1.0f0 -const C_Na = 1.0f0 -const C_s = 1.0f0 -const D_Ca = 0.0f0 -const D_Na = 0.0f0 -const g_s = 0.09f0 -const g_Na = 4.0f0 -const g_NaC = 0.005f0 -const ENa = 50.0f0 + D_Na -const γ = 0.5f0 -const C_m = 1.0f0 -``` - -Note that the constants are defined as `Float32` and not `Float64`. The reason is that most GPUs have many more single precision cores than double precision ones. To ensure uniformity between CPU and GPU, we also code most states variables as `Float32` except for the transmembrane potential, which is solved by an implicit solver provided by the Sundial library and needs to be `Float64`. - -### The State Structure - -Next, we define a struct to contain our state. `BeelerReuterCpu` is a functor and we will define a deriv function as its associated function. - -```julia -mutable struct BeelerReuterCpu <: Function - t::Float64 # the last timestep time to calculate Δt - diff_coef::Float64 # the diffusion-coefficient (coupling strength) - - C::Array{Float32, 2} # intracellular calcium concentration - M::Array{Float32, 2} # sodium current activation gate (m) - H::Array{Float32, 2} # sodium current inactivation gate (h) - J::Array{Float32, 2} # sodium current slow inactivaiton gate (j) - D::Array{Float32, 2} # calcium current activaiton gate (d) - F::Array{Float32, 2} # calcium current inactivation gate (f) - XI::Array{Float32, 2} # inward-rectifying potassium current (iK1) - - Δu::Array{Float64, 2} # place-holder for the Laplacian - - function BeelerReuterCpu(u0, diff_coef) - self = new() - - ny, nx = size(u0) - self.t = 0.0 - self.diff_coef = diff_coef - - self.C = fill(0.0001f0, (ny,nx)) - self.M = fill(0.01f0, (ny,nx)) - self.H = fill(0.988f0, (ny,nx)) - self.J = fill(0.975f0, (ny,nx)) - self.D = fill(0.003f0, (ny,nx)) - self.F = fill(0.994f0, (ny,nx)) - self.XI = fill(0.0001f0, (ny,nx)) - - self.Δu = zeros(ny,nx) - - return self - end -end -``` - -### Laplacian - -The finite-difference Laplacian is calculated in-place by a 5-point stencil. The Neumann boundary condition is enforced. Note that we could have also used [DiffEqOperators.jl](https://github.com/JuliaDiffEq/DiffEqOperators.jl) to automate this step. - -```julia -# 5-point stencil -function laplacian(Δu, u) - n1, n2 = size(u) - - # internal nodes - for j = 2:n2-1 - for i = 2:n1-1 - @inbounds Δu[i,j] = u[i+1,j] + u[i-1,j] + u[i,j+1] + u[i,j-1] - 4*u[i,j] - end - end - - # left/right edges - for i = 2:n1-1 - @inbounds Δu[i,1] = u[i+1,1] + u[i-1,1] + 2*u[i,2] - 4*u[i,1] - @inbounds Δu[i,n2] = u[i+1,n2] + u[i-1,n2] + 2*u[i,n2-1] - 4*u[i,n2] - end - - # top/bottom edges - for j = 2:n2-1 - @inbounds Δu[1,j] = u[1,j+1] + u[1,j-1] + 2*u[2,j] - 4*u[1,j] - @inbounds Δu[n1,j] = u[n1,j+1] + u[n1,j-1] + 2*u[n1-1,j] - 4*u[n1,j] - end - - # corners - @inbounds Δu[1,1] = 2*(u[2,1] + u[1,2]) - 4*u[1,1] - @inbounds Δu[n1,1] = 2*(u[n1-1,1] + u[n1,2]) - 4*u[n1,1] - @inbounds Δu[1,n2] = 2*(u[2,n2] + u[1,n2-1]) - 4*u[1,n2] - @inbounds Δu[n1,n2] = 2*(u[n1-1,n2] + u[n1,n2-1]) - 4*u[n1,n2] -end -``` - -### The Rush-Larsen Method - -We use an explicit solver for all the state variables except for the transmembrane potential which is solved with the help of an implicit solver. The explicit solver is a domain-specific exponential method, the Rush-Larsen method. This method utilizes an approximation on the model in order to transform the IMEX equation into a form suitable for an implicit ODE solver. This combination of implicit and explicit methods forms a specialized IMEX solver. For general IMEX integration, please see the [IMEX solvers documentation](https://docs.sciml.ai/latest/solvers/split_ode_solve/#Implicit-Explicit-(IMEX)-ODE-1). While we could have used the general model to solve the current problem, for this specific model, the transformation approach is more efficient and is of practical interest. - -The [Rush-Larsen](https://ieeexplore.ieee.org/document/4122859/) method replaces the explicit Euler integration for the gating variables with direct integration. The starting point is the general ODE for the gating variables in Hodgkin-Huxley style ODEs, - -$$\frac{dg}{dt} = \alpha(V) (1 - g) - \beta(V) g$$ - -where $g$ is a generic gating variable, ranging from 0 to 1, and $\alpha$ and $\beta$ are reaction rates. This equation can be written as, - -$$\frac{dg}{dt} = (g_{\infty} - g) / \tau_g,$$ - -where $g_\infty$ and $\tau_g$ are - -$$g_{\infty} = \frac{\alpha}{(\alpha + \beta)},$$ - -and, - -$$\tau_g = \frac{1}{(\alpha + \beta)}.$$ - -Assuing that $g_\infty$ and $\tau_g$ are constant for the duration of a single time step ($\Delta{t}$), which is a reasonable assumption for most cardiac models, we can integrate directly to have, - -$$g(t + \Delta{t}) = g_{\infty} - \left(g_{\infty} - g(\Delta{t})\right)\,e^{-\Delta{t}/\tau_g}.$$ - -This is the Rush-Larsen technique. Note that as $\Delta{t} \rightarrow 0$, this equations morphs into the explicit Euler formula, - -$$g(t + \Delta{t}) = g(t) + \Delta{t}\frac{dg}{dt}.$$ - -`rush_larsen` is a helper function that use the Rush-Larsen method to integrate the gating variables. - -```julia -@inline function rush_larsen(g, α, β, Δt) - inf = α/(α+β) - τ = 1f0 / (α+β) - return clamp(g + (g - inf) * expm1(-Δt/τ), 0f0, 1f0) -end -``` - -The gating variables are updated as below. The details of how to calculate $\alpha$ and $\beta$ are based on the Beeler-Reuter model and not of direct interest to this tutorial. - -```julia -function update_M_cpu(g, v, Δt) - # the condition is needed here to prevent NaN when v == 47.0 - α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (exp(-0.1f0*(v+47.0f0)) - 1.0f0) - β = (40.0f0 * exp(-0.056f0*(v+72.0f0))) - return rush_larsen(g, α, β, Δt) -end - -function update_H_cpu(g, v, Δt) - α = 0.126f0 * exp(-0.25f0*(v+77.0f0)) - β = 1.7f0 / (exp(-0.082f0*(v+22.5f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_J_cpu(g, v, Δt) - α = (0.55f0 * exp(-0.25f0*(v+78.0f0))) / (exp(-0.2f0*(v+78.0f0)) + 1.0f0) - β = 0.3f0 / (exp(-0.1f0*(v+32.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_D_cpu(g, v, Δt) - α = γ * (0.095f0 * exp(-0.01f0*(v-5.0f0))) / (exp(-0.072f0*(v-5.0f0)) + 1.0f0) - β = γ * (0.07f0 * exp(-0.017f0*(v+44.0f0))) / (exp(0.05f0*(v+44.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_F_cpu(g, v, Δt) - α = γ * (0.012f0 * exp(-0.008f0*(v+28.0f0))) / (exp(0.15f0*(v+28.0f0)) + 1.0f0) - β = γ * (0.0065f0 * exp(-0.02f0*(v+30.0f0))) / (exp(-0.2f0*(v+30.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end - -function update_XI_cpu(g, v, Δt) - α = (0.0005f0 * exp(0.083f0*(v+50.0f0))) / (exp(0.057f0*(v+50.0f0)) + 1.0f0) - β = (0.0013f0 * exp(-0.06f0*(v+20.0f0))) / (exp(-0.04f0*(v+20.0f0)) + 1.0f0) - return rush_larsen(g, α, β, Δt) -end -``` - -The intracelleular calcium is not technically a gating variable, but we can use a similar explicit exponential integrator for it. - -```julia -function update_C_cpu(g, d, f, v, Δt) - ECa = D_Ca - 82.3f0 - 13.0278f0 * log(g) - kCa = C_s * g_s * d * f - iCa = kCa * (v - ECa) - inf = 1.0f-7 * (0.07f0 - g) - τ = 1f0 / 0.07f0 - return g + (g - inf) * expm1(-Δt/τ) -end -``` - -### Implicit Solver - -Now, it is time to define the derivative function as an associated function of **BeelerReuterCpu**. We plan to use the CVODE_BDF solver as our implicit portion. Similar to other iterative methods, it calls the deriv function with the same $t$ multiple times. For example, these are consecutive $t$s from a representative run: - -0.86830 -0.86830 -0.85485 -0.85485 -0.85485 -0.86359 -0.86359 -0.86359 -0.87233 -0.87233 -0.87233 -0.88598 -... - -Here, every time step is called three times. We distinguish between two types of calls to the deriv function. When $t$ changes, the gating variables are updated by calling `update_gates_cpu`: - -```julia -function update_gates_cpu(u, XI, M, H, J, D, F, C, Δt) - let Δt = Float32(Δt) - n1, n2 = size(u) - for j = 1:n2 - for i = 1:n1 - v = Float32(u[i,j]) - - XI[i,j] = update_XI_cpu(XI[i,j], v, Δt) - M[i,j] = update_M_cpu(M[i,j], v, Δt) - H[i,j] = update_H_cpu(H[i,j], v, Δt) - J[i,j] = update_J_cpu(J[i,j], v, Δt) - D[i,j] = update_D_cpu(D[i,j], v, Δt) - F[i,j] = update_F_cpu(F[i,j], v, Δt) - - C[i,j] = update_C_cpu(C[i,j], D[i,j], F[i,j], v, Δt) - end - end - end -end -``` - -On the other hand, du is updated at each time step, since it is independent of $\Delta{t}$. - -```julia -# iK1 is the inward-rectifying potassium current -function calc_iK1(v) - ea = exp(0.04f0*(v+85f0)) - eb = exp(0.08f0*(v+53f0)) - ec = exp(0.04f0*(v+53f0)) - ed = exp(-0.04f0*(v+23f0)) - return 0.35f0 * (4f0*(ea-1f0)/(eb + ec) - + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed))) -end - -# ix1 is the time-independent background potassium current -function calc_ix1(v, xi) - ea = exp(0.04f0*(v+77f0)) - eb = exp(0.04f0*(v+35f0)) - return xi * 0.8f0 * (ea-1f0) / eb -end - -# iNa is the sodium current (similar to the classic Hodgkin-Huxley model) -function calc_iNa(v, m, h, j) - return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa) -end - -# iCa is the calcium current -function calc_iCa(v, d, f, c) - ECa = D_Ca - 82.3f0 - 13.0278f0 * log(c) # ECa is the calcium reversal potential - return C_s * g_s * d * f * (v - ECa) -end - -function update_du_cpu(du, u, XI, M, H, J, D, F, C) - n1, n2 = size(u) - - for j = 1:n2 - for i = 1:n1 - v = Float32(u[i,j]) - - # calculating individual currents - iK1 = calc_iK1(v) - ix1 = calc_ix1(v, XI[i,j]) - iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j]) - iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j]) - - # total current - I_sum = iK1 + ix1 + iNa + iCa - - # the reaction part of the reaction-diffusion equation - du[i,j] = -I_sum / C_m - end - end -end -``` - -Finally, we put everything together is our deriv function, which is a call on `BeelerReuterCpu`. - -```julia -function (f::BeelerReuterCpu)(du, u, p, t) - Δt = t - f.t - - if Δt != 0 || t == 0 - update_gates_cpu(u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C, Δt) - f.t = t - end - - laplacian(f.Δu, u) - - # calculate the reaction portion - update_du_cpu(du, u, f.XI, f.M, f.H, f.J, f.D, f.F, f.C) - - # ...add the diffusion portion - du .+= f.diff_coef .* f.Δu -end -``` - -### Results - -Time to test! We need to define the starting transmembrane potential with the help of global constants **v0** and **v1**, which represent the resting and activated potentials. - -```julia -const N = 192; -u0 = fill(v0, (N, N)); -u0[90:102,90:102] .= v1; # a small square in the middle of the domain -``` - -The initial condition is a small square in the middle of the domain. - -```julia -using Plots -heatmap(u0) -``` - -Next, the problem is defined: - -```julia -using DifferentialEquations, Sundials - -deriv_cpu = BeelerReuterCpu(u0, 1.0); -prob = ODEProblem(deriv_cpu, u0, (0.0, 50.0)); -``` - -For stiff reaction-diffusion equations, CVODE_BDF from Sundial library is an excellent solver. - -```julia -@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0); -``` - -```julia -heatmap(sol.u[end]) -``` - -## CPU/GPU Beeler-Reuter Solver - -GPUs are great for embarrassingly parallel problems but not so much for highly coupled models. We plan to keep the implicit part on CPU and run the decoupled explicit code on a GPU with the help of the CUDAnative library. - -### GPUs and CUDA - -It this section, we present a brief summary of how GPUs (specifically NVIDIA GPUs) work and how to program them using the Julia CUDA interface. The readers who are familiar with these basic concepts may skip this section. - -Let's start by looking at the hardware of a typical high-end GPU, GTX 1080. It has four Graphics Processing Clusters (equivalent to a discrete CPU), each harboring five Streaming Multiprocessor (similar to a CPU core). Each SM has 128 single-precision CUDA cores. Therefore, GTX 1080 has a total of 4 x 5 x 128 = 2560 CUDA cores. The maximum theoretical throughput for a GTX 1080 is reported as 8.87 TFLOPS. This figure is calculated for a boost clock frequency of 1.733 MHz as 2 x 2560 x 1.733 MHz = 8.87 TFLOPS. The factor 2 is included because two single floating point operations, a multiplication and an addition, can be done in a clock cycle as part of a fused-multiply-addition FMA operation. GTX 1080 also has 8192 MB of global memory accessible to all the cores (in addition to local and shared memory on each SM). - -A typical CUDA application has the following flow: - -1. Define and initialize the problem domain tensors (multi-dimensional arrays) in CPU memory. -2. Allocate corresponding tensors in the GPU global memory. -3. Transfer the input tensors from CPU to the corresponding GPU tensors. -4. Invoke CUDA kernels (i.e., the GPU functions callable from CPU) that operate on the GPU tensors. -5. Transfer the result tensors from GPU back to CPU. -6. Process tensors on CPU. -7. Repeat steps 3-6 as needed. - -Some libraries, such as [ArrayFire](https://github.com/arrayfire/arrayfire), hide the complexicities of steps 2-5 behind a higher level of abstraction. However, here we take a lower level route. By using [CuArray](https://github.com/JuliaGPU/CuArrays.jl) and [CUDAnative](https://github.com/JuliaGPU/CUDAnative.jl), we achieve a finer-grained control and higher performance. In return, we need to implement each step manually. - -*CuArray* is a thin abstraction layer over the CUDA API and allows us to define GPU-side tensors and copy data to and from them but does not provide for operations on tensors. *CUDAnative* is a compiler that translates Julia functions designated as CUDA kernels into ptx (a high-level CUDA assembly language). - -### The CUDA Code - -The key to fast CUDA programs is to minimize CPU/GPU memory transfers and global memory accesses. The implicit solver is currently CPU only, but it only needs access to the transmembrane potential. The rest of state variables reside on the GPU memory. - -We modify ``BeelerReuterCpu`` into ``BeelerReuterGpu`` by defining the state variables as *CuArray*s instead of standard Julia *Array*s. The name of each variable defined on GPU is prefixed by *d_* for clarity. Note that $\Delta{v}$ is a temporary storage for the Laplacian and stays on the CPU side. - -```julia -using CUDAnative, CuArrays - -mutable struct BeelerReuterGpu <: Function - t::Float64 # the last timestep time to calculate Δt - diff_coef::Float64 # the diffusion-coefficient (coupling strength) - - d_C::CuArray{Float32, 2} # intracellular calcium concentration - d_M::CuArray{Float32, 2} # sodium current activation gate (m) - d_H::CuArray{Float32, 2} # sodium current inactivation gate (h) - d_J::CuArray{Float32, 2} # sodium current slow inactivaiton gate (j) - d_D::CuArray{Float32, 2} # calcium current activaiton gate (d) - d_F::CuArray{Float32, 2} # calcium current inactivation gate (f) - d_XI::CuArray{Float32, 2} # inward-rectifying potassium current (iK1) - - d_u::CuArray{Float64, 2} # place-holder for u in the device memory - d_du::CuArray{Float64, 2} # place-holder for d_u in the device memory - - Δv::Array{Float64, 2} # place-holder for voltage gradient - - function BeelerReuterGpu(u0, diff_coef) - self = new() - - ny, nx = size(u0) - @assert (nx % 16 == 0) && (ny % 16 == 0) - self.t = 0.0 - self.diff_coef = diff_coef - - self.d_C = CuArray(fill(0.0001f0, (ny,nx))) - self.d_M = CuArray(fill(0.01f0, (ny,nx))) - self.d_H = CuArray(fill(0.988f0, (ny,nx))) - self.d_J = CuArray(fill(0.975f0, (ny,nx))) - self.d_D = CuArray(fill(0.003f0, (ny,nx))) - self.d_F = CuArray(fill(0.994f0, (ny,nx))) - self.d_XI = CuArray(fill(0.0001f0, (ny,nx))) - - self.d_u = CuArray(u0) - self.d_du = CuArray(zeros(ny,nx)) - - self.Δv = zeros(ny,nx) - - return self - end -end -``` - -The Laplacian function remains unchanged. The main change to the explicit gating solvers is that *exp* and *expm1* functions are prefixed by *CUDAnative.*. This is a technical nuisance that will hopefully be resolved in future. - -```julia -function rush_larsen_gpu(g, α, β, Δt) - inf = α/(α+β) - τ = 1.0/(α+β) - return clamp(g + (g - inf) * CUDAnative.expm1(-Δt/τ), 0f0, 1f0) -end - -function update_M_gpu(g, v, Δt) - # the condition is needed here to prevent NaN when v == 47.0 - α = isapprox(v, 47.0f0) ? 10.0f0 : -(v+47.0f0) / (CUDAnative.exp(-0.1f0*(v+47.0f0)) - 1.0f0) - β = (40.0f0 * CUDAnative.exp(-0.056f0*(v+72.0f0))) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_H_gpu(g, v, Δt) - α = 0.126f0 * CUDAnative.exp(-0.25f0*(v+77.0f0)) - β = 1.7f0 / (CUDAnative.exp(-0.082f0*(v+22.5f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_J_gpu(g, v, Δt) - α = (0.55f0 * CUDAnative.exp(-0.25f0*(v+78.0f0))) / (CUDAnative.exp(-0.2f0*(v+78.0f0)) + 1.0f0) - β = 0.3f0 / (CUDAnative.exp(-0.1f0*(v+32.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_D_gpu(g, v, Δt) - α = γ * (0.095f0 * CUDAnative.exp(-0.01f0*(v-5.0f0))) / (CUDAnative.exp(-0.072f0*(v-5.0f0)) + 1.0f0) - β = γ * (0.07f0 * CUDAnative.exp(-0.017f0*(v+44.0f0))) / (CUDAnative.exp(0.05f0*(v+44.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_F_gpu(g, v, Δt) - α = γ * (0.012f0 * CUDAnative.exp(-0.008f0*(v+28.0f0))) / (CUDAnative.exp(0.15f0*(v+28.0f0)) + 1.0f0) - β = γ * (0.0065f0 * CUDAnative.exp(-0.02f0*(v+30.0f0))) / (CUDAnative.exp(-0.2f0*(v+30.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_XI_gpu(g, v, Δt) - α = (0.0005f0 * CUDAnative.exp(0.083f0*(v+50.0f0))) / (CUDAnative.exp(0.057f0*(v+50.0f0)) + 1.0f0) - β = (0.0013f0 * CUDAnative.exp(-0.06f0*(v+20.0f0))) / (CUDAnative.exp(-0.04f0*(v+20.0f0)) + 1.0f0) - return rush_larsen_gpu(g, α, β, Δt) -end - -function update_C_gpu(c, d, f, v, Δt) - ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c) - kCa = C_s * g_s * d * f - iCa = kCa * (v - ECa) - inf = 1.0f-7 * (0.07f0 - c) - τ = 1f0 / 0.07f0 - return c + (c - inf) * CUDAnative.expm1(-Δt/τ) -end -``` - -Similarly, we modify the functions to calculate the individual currents by adding CUDAnative prefix. - -```julia -# iK1 is the inward-rectifying potassium current -function calc_iK1(v) - ea = CUDAnative.exp(0.04f0*(v+85f0)) - eb = CUDAnative.exp(0.08f0*(v+53f0)) - ec = CUDAnative.exp(0.04f0*(v+53f0)) - ed = CUDAnative.exp(-0.04f0*(v+23f0)) - return 0.35f0 * (4f0*(ea-1f0)/(eb + ec) - + 0.2f0 * (isapprox(v, -23f0) ? 25f0 : (v+23f0) / (1f0-ed))) -end - -# ix1 is the time-independent background potassium current -function calc_ix1(v, xi) - ea = CUDAnative.exp(0.04f0*(v+77f0)) - eb = CUDAnative.exp(0.04f0*(v+35f0)) - return xi * 0.8f0 * (ea-1f0) / eb -end - -# iNa is the sodium current (similar to the classic Hodgkin-Huxley model) -function calc_iNa(v, m, h, j) - return C_Na * (g_Na * m^3 * h * j + g_NaC) * (v - ENa) -end - -# iCa is the calcium current -function calc_iCa(v, d, f, c) - ECa = D_Ca - 82.3f0 - 13.0278f0 * CUDAnative.log(c) # ECa is the calcium reversal potential - return C_s * g_s * d * f * (v - ECa) -end -``` - -### CUDA Kernels - -A CUDA program does not directly deal with GPCs and SMs. The logical view of a CUDA program is in the term of *blocks* and *threads*. We have to specify the number of block and threads when running a CUDA *kernel*. Each thread runs on a single CUDA core. Threads are logically bundled into blocks, which are in turn specified on a grid. The grid stands for the entirety of the domain of interest. - -Each thread can find its logical coordinate by using few pre-defined indexing variables (*threadIdx*, *blockIdx*, *blockDim* and *gridDim*) in C/C++ and the corresponding functions (e.g., `threadIdx()`) in Julia. There variables and functions are defined automatically for each thread and may return a different value depending on the calling thread. The return value of these functions is a 1, 2, or 3 dimensional structure whose elements can be accessed as `.x`, `.y`, and `.z` (for a 1-dimensional case, `.x` reports the actual index and `.y` and `.z` simply return 1). For example, if we deploy a kernel in 128 blocks and with 256 threads per block, each thread will see - -``` - gridDim.x = 128; - blockDim=256; -``` - -while `blockIdx.x` ranges from 0 to 127 in C/C++ and 1 to 128 in Julia. Similarly, `threadIdx.x` will be between 0 to 255 in C/C++ (of course, in Julia the range will be 1 to 256). - -A C/C++ thread can calculate its index as - -``` - int idx = blockDim.x * blockIdx.x + threadIdx.x; -``` - -In Julia, we have to take into account base 1. Therefore, we use the following formula - -``` - idx = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x -``` - -A CUDA programmer is free to interpret the calculated index however it fits the application, but in practice, it is usually interpreted as an index into input tensors. - -In the GPU version of the solver, each thread works on a single element of the medium, indexed by a (x,y) pair. -`update_gates_gpu` and `update_du_gpu` are very similar to their CPU counterparts but are in fact CUDA kernels where the *for* loops are replaced with CUDA specific indexing. Note that CUDA kernels cannot return a valve; hence, *nothing* at the end. - -```julia -function update_gates_gpu(u, XI, M, H, J, D, F, C, Δt) - i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x - j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y - - v = Float32(u[i,j]) - - let Δt = Float32(Δt) - XI[i,j] = update_XI_gpu(XI[i,j], v, Δt) - M[i,j] = update_M_gpu(M[i,j], v, Δt) - H[i,j] = update_H_gpu(H[i,j], v, Δt) - J[i,j] = update_J_gpu(J[i,j], v, Δt) - D[i,j] = update_D_gpu(D[i,j], v, Δt) - F[i,j] = update_F_gpu(F[i,j], v, Δt) - - C[i,j] = update_C_gpu(C[i,j], D[i,j], F[i,j], v, Δt) - end - nothing -end - -function update_du_gpu(du, u, XI, M, H, J, D, F, C) - i = (blockIdx().x-UInt32(1)) * blockDim().x + threadIdx().x - j = (blockIdx().y-UInt32(1)) * blockDim().y + threadIdx().y - - v = Float32(u[i,j]) - - # calculating individual currents - iK1 = calc_iK1(v) - ix1 = calc_ix1(v, XI[i,j]) - iNa = calc_iNa(v, M[i,j], H[i,j], J[i,j]) - iCa = calc_iCa(v, D[i,j], F[i,j], C[i,j]) - - # total current - I_sum = iK1 + ix1 + iNa + iCa - - # the reaction part of the reaction-diffusion equation - du[i,j] = -I_sum / C_m - nothing -end -``` - -### Implicit Solver - -Finally, the deriv function is modified to copy *u* to GPU and copy *du* back and to invoke CUDA kernels. - -```julia -function (f::BeelerReuterGpu)(du, u, p, t) - L = 16 # block size - Δt = t - f.t - copyto!(f.d_u, u) - ny, nx = size(u) - - if Δt != 0 || t == 0 - @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_gates_gpu( - f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C, Δt) - f.t = t - end - - laplacian(f.Δv, u) - - # calculate the reaction portion - @cuda blocks=(ny÷L,nx÷L) threads=(L,L) update_du_gpu( - f.d_du, f.d_u, f.d_XI, f.d_M, f.d_H, f.d_J, f.d_D, f.d_F, f.d_C) - - copyto!(du, f.d_du) - - # ...add the diffusion portion - du .+= f.diff_coef .* f.Δv -end -``` - -Ready to test! - -```julia -using DifferentialEquations, Sundials - -deriv_gpu = BeelerReuterGpu(u0, 1.0); -prob = ODEProblem(deriv_gpu, u0, (0.0, 50.0)); -@time sol = solve(prob, CVODE_BDF(linear_solver=:GMRES), saveat=100.0); -``` - -```julia -heatmap(sol.u[end]) -``` - -## Summary - -We achieve around a 6x speedup with running the explicit portion of our IMEX solver on a GPU. The major bottleneck of this technique is the communication between CPU and GPU. In its current form, not all of the internals of the method utilize GPU acceleration. In particular, the implicit equations solved by GMRES are performed on the CPU. This partial CPU nature also increases the amount of data transfer that is required between the GPU and CPU (performed every f call). Compiling the full ODE solver to the GPU would solve both of these issues and potentially give a much larger speedup. [JuliaDiffEq developers are currently working on solutions to alleviate these issues](http://www.stochasticlifestyle.com/solving-systems-stochastic-pdes-using-gpus-julia/), but these will only be compatible with native Julia solvers (and not Sundials). - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/advanced/02-advanced_ODE_solving.jmd b/tutorials/advanced/02-advanced_ODE_solving.jmd deleted file mode 100644 index 4e7a44c7..00000000 --- a/tutorials/advanced/02-advanced_ODE_solving.jmd +++ /dev/null @@ -1,511 +0,0 @@ ---- -title: Solving Stiff Equations -author: Chris Rackauckas ---- - -This tutorial is for getting into the extra features for solving stiff ordinary -differential equations in an efficient manner. Solving stiff ordinary -differential equations requires specializing the linear solver on properties of -the Jacobian in order to cut down on the O(n^3) linear solve and the O(n^2) -back-solves. Note that these same functions and controls also extend to stiff -SDEs, DDEs, DAEs, etc. - -## Code Optimization for Differential Equations - -### Writing Efficient Code - -For a detailed tutorial on how to optimize one's DifferentialEquations.jl code, -please see the -[Optimizing DiffEq Code tutorial](http://tutorials.sciml.ai/html/introduction/03-optimizing_diffeq_code.html). - -### Choosing a Good Solver - -Choosing a good solver is required for getting top notch speed. General -recommendations can be found on the solver page (for example, the -[ODE Solver Recommendations](https://docs.sciml.ai/dev/solvers/ode_solve)). -The current recommendations can be simplified to a Rosenbrock method -(`Rosenbrock23` or `Rodas5`) for smaller (<50 ODEs) problems, ESDIRK methods -for slightly larger (`TRBDF2` or `KenCarp4` for <2000 ODEs), and Sundials -`CVODE_BDF` for even larger problems. `lsoda` from -[LSODA.jl](https://github.com/rveltz/LSODA.jl) is generally worth a try. - -More details on the solver to choose can be found by benchmarking. See the -[DiffEqBenchmarks](https://github.com/JuliaDiffEq/DiffEqBenchmarks.jl) to -compare many solvers on many problems. - -### Check Out the Speed FAQ - -See [this FAQ](https://docs.sciml.ai/latest/basics/faq/#faq_performance-1) -for information on common pitfalls and how to improve performance. - -### Setting Up Your Julia Installation for Speed - -Julia uses an underlying BLAS implementation for its matrix multiplications -and factorizations. This library is automatically multithreaded and accelerates -the internal linear algebra of DifferentialEquations.jl. However, for optimality, -you should make sure that the number of BLAS threads that you are using matches -the number of physical cores and not the number of logical cores. See -[this issue for more details](https://github.com/JuliaLang/julia/issues/33409). - -To check the number of BLAS threads, use: - -```julia -ccall((:openblas_get_num_threads64_, Base.libblas_name), Cint, ()) -``` - -If I want to set this directly to 4 threads, I would use: - -```julia -using LinearAlgebra -LinearAlgebra.BLAS.set_num_threads(4) -``` - -Additionally, in some cases Intel's MKL might be a faster BLAS than the standard -BLAS that ships with Julia (OpenBLAS). To switch your BLAS implementation, you -can use [MKL.jl](https://github.com/JuliaComputing/MKL.jl) which will accelerate -the linear algebra routines. Please see the package for the limitations. - -### Use Accelerator Hardware - -When possible, use GPUs. If your ODE system is small and you need to solve it -with very many different parameters, see the -[ensembles interface](https://docs.sciml.ai/dev/features/ensemble) -and [DiffEqGPU.jl](https://github.com/JuliaDiffEq/DiffEqGPU.jl). If your problem -is large, consider using a [CuArray](https://github.com/JuliaGPU/CuArrays.jl) -for the state to allow for GPU-parallelism of the internal linear algebra. - -## Speeding Up Jacobian Calculations - -When one is using an implicit or semi-implicit differential equation solver, -the Jacobian must be built at many iterations and this can be one of the most -expensive steps. There are two pieces that must be optimized in order to reach -maximal efficiency when solving stiff equations: the sparsity pattern and the -construction of the Jacobian. The construction is filling the matrix -`J` with values, while the sparsity pattern is what `J` to use. - -The sparsity pattern is given by a prototype matrix, the `jac_prototype`, which -will be copied to be used as `J`. The default is for `J` to be a `Matrix`, -i.e. a dense matrix. However, if you know the sparsity of your problem, then -you can pass a different matrix type. For example, a `SparseMatrixCSC` will -give a sparse matrix. Additionally, structured matrix types like `Tridiagonal`, -`BandedMatrix` (from -[BandedMatrices.jl](https://github.com/JuliaMatrices/BandedMatrices.jl)), -`BlockBandedMatrix` (from -[BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl)), -and more can be given. DifferentialEquations.jl will internally use this matrix -type, making the factorizations faster by utilizing the specialized forms. - -For the construction, there are 3 ways to fill `J`: - -- The default, which uses normal finite/automatic differentiation -- A function `jac(J,u,p,t)` which directly computes the values of `J` -- A `colorvec` which defines a sparse differentiation scheme. - -We will now showcase how to make use of this functionality with growing complexity. - -### Declaring Jacobian Functions - -Let's solve the Rosenbrock equations: - -$$\begin{align} -dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ -dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ -dy_3 &= 3*10^7 y_{3}^2 \\ -\end{align}$$ - -In order to reduce the Jacobian construction cost, one can describe a Jacobian -function by using the `jac` argument for the `ODEFunction`. First, let's do -a standard `ODEProblem`: - -```julia -using DifferentialEquations -function rober(du,u,p,t) - y₁,y₂,y₃ = u - k₁,k₂,k₃ = p - du[1] = -k₁*y₁+k₃*y₂*y₃ - du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃ - du[3] = k₂*y₂^2 - nothing -end -prob = ODEProblem(rober,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) -sol = solve(prob,Rosenbrock23()) - -using Plots -plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) -``` - -```julia -using BenchmarkTools -@btime solve(prob) -``` - -Now we want to add the Jacobian. First we have to derive the Jacobian -$\frac{df_i}{du_j}$ which is `J[i,j]`. From this we get: - -```julia -function rober_jac(J,u,p,t) - y₁,y₂,y₃ = u - k₁,k₂,k₃ = p - J[1,1] = k₁ * -1 - J[2,1] = k₁ - J[3,1] = 0 - J[1,2] = y₃ * k₃ - J[2,2] = y₂ * k₂ * -2 + y₃ * k₃ * -1 - J[3,2] = y₂ * 2 * k₂ - J[1,3] = k₃ * y₂ - J[2,3] = k₃ * y₂ * -1 - J[3,3] = 0 - nothing -end -f = ODEFunction(rober, jac=rober_jac) -prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) - -@btime solve(prob_jac) -``` - -### Automatic Derivation of Jacobian Functions - -But that was hard! If you want to take the symbolic Jacobian of numerical -code, we can make use of [ModelingToolkit.jl](https://github.com/JuliaDiffEq/ModelingToolkit.jl) -to symbolicify the numerical code and do the symbolic calculation and return -the Julia code for this. - -```julia -using ModelingToolkit -de = modelingtoolkitize(prob) -ModelingToolkit.generate_jacobian(de...)[2] # Second is in-place -``` - -which outputs: - -```julia;eval=false -:((##MTIIPVar#376, u, p, t)->begin - #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:65 =# - #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:66 =# - let (x₁, x₂, x₃, α₁, α₂, α₃) = (u[1], u[2], u[3], p[1], p[2], p[3]) - ##MTIIPVar#376[1] = α₁ * -1 - ##MTIIPVar#376[2] = α₁ - ##MTIIPVar#376[3] = 0 - ##MTIIPVar#376[4] = x₃ * α₃ - ##MTIIPVar#376[5] = x₂ * α₂ * -2 + x₃ * α₃ * -1 - ##MTIIPVar#376[6] = x₂ * 2 * α₂ - ##MTIIPVar#376[7] = α₃ * x₂ - ##MTIIPVar#376[8] = α₃ * x₂ * -1 - ##MTIIPVar#376[9] = 0 - end - #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:67 =# - nothing - end) -``` - -Now let's use that to give the analytical solution Jacobian: - -```julia -jac = eval(ModelingToolkit.generate_jacobian(de...)[2]) -f = ODEFunction(rober, jac=jac) -prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) -``` - -### Declaring a Sparse Jacobian - -Jacobian sparsity is declared by the `jac_prototype` argument in the `ODEFunction`. -Note that you should only do this if the sparsity is high, for example, 0.1% -of the matrix is non-zeros, otherwise the overhead of sparse matrices can be higher -than the gains from sparse differentiation! - -But as a demonstration, let's build a sparse matrix for the Rober problem. We -can do this by gathering the `I` and `J` pairs for the non-zero components, like: - -```julia -I = [1,2,1,2,3,1,2] -J = [1,1,2,2,2,3,3] -using SparseArrays -jac_prototype = sparse(I,J,1.0) -``` - -Now this is the sparse matrix prototype that we want to use in our solver, which -we then pass like: - -```julia -f = ODEFunction(rober, jac=jac, jac_prototype=jac_prototype) -prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) -``` - -### Automatic Sparsity Detection - -One of the useful companion tools for DifferentialEquations.jl is -[SparsityDetection.jl](https://github.com/JuliaDiffEq/SparsityDetection.jl). -This allows for automatic declaration of Jacobian sparsity types. To see this -in action, let's look at the 2-dimensional Brusselator equation: - -```julia -const N = 32 -const xyd_brusselator = range(0,stop=1,length=N) -brusselator_f(x, y, t) = (((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) * (t >= 1.1) * 5. -limit(a, N) = a == N+1 ? 1 : a == 0 ? N : a -function brusselator_2d_loop(du, u, p, t) - A, B, alpha, dx = p - alpha = alpha/dx^2 - @inbounds for I in CartesianIndices((N, N)) - i, j = Tuple(I) - x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]] - ip1, im1, jp1, jm1 = limit(i+1, N), limit(i-1, N), limit(j+1, N), limit(j-1, N) - du[i,j,1] = alpha*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) + - B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t) - du[i,j,2] = alpha*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) + - A*u[i,j,1] - u[i,j,1]^2*u[i,j,2] - end -end -p = (3.4, 1., 10., step(xyd_brusselator)) -``` - -Given this setup, we can give and example `input` and `output` and call `sparsity!` -on our function with the example arguments and it will kick out a sparse matrix -with our pattern, that we can turn into our `jac_prototype`. - -```julia -using SparsityDetection, SparseArrays -input = rand(32,32,2) -output = similar(input) -sparsity_pattern = jacobian_sparsity(brusselator_2d_loop,output,input,p,0.0) -jac_sparsity = Float64.(sparse(sparsity_pattern)) -``` - -Let's double check what our sparsity pattern looks like: - -```julia -using Plots -spy(jac_sparsity,markersize=1,colorbar=false,color=:deep) -``` - -That's neat, and would be tedius to build by hand! Now we just pass it to the -`ODEFunction` like as before: - -```julia -f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity) -``` - -Build the `ODEProblem`: - -```julia -function init_brusselator_2d(xyd) - N = length(xyd) - u = zeros(N, N, 2) - for I in CartesianIndices((N, N)) - x = xyd[I[1]] - y = xyd[I[2]] - u[I,1] = 22*(y*(1-y))^(3/2) - u[I,2] = 27*(x*(1-x))^(3/2) - end - u -end -u0 = init_brusselator_2d(xyd_brusselator) -prob_ode_brusselator_2d = ODEProblem(brusselator_2d_loop, - u0,(0.,11.5),p) - -prob_ode_brusselator_2d_sparse = ODEProblem(f, - u0,(0.,11.5),p) -``` - -Now let's see how the version with sparsity compares to the version without: - -```julia -@btime solve(prob_ode_brusselator_2d,save_everystep=false) -@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false) -``` - -### Declaring Color Vectors for Fast Construction - -If you cannot directly define a Jacobian function, you can use the `colorvec` -to speed up the Jacobian construction. What the `colorvec` does is allows for -calculating multiple columns of a Jacobian simultaniously by using the sparsity -pattern. An explanation of matrix coloring can be found in the -[MIT 18.337 Lecture Notes](https://mitmath.github.io/18337/lecture9/stiff_odes). - -To perform general matrix coloring, we can use -[SparseDiffTools.jl](https://github.com/JuliaDiffEq/SparseDiffTools.jl). For -example, for the Brusselator equation: - -```julia -using SparseDiffTools -colorvec = matrix_colors(jac_sparsity) -@show maximum(colorvec) -``` - -This means that we can now calculate the Jacobian in 12 function calls. This is -a nice reduction from 2048 using only automated tooling! To now make use of this -inside of the ODE solver, you simply need to declare the colorvec: - -```julia -f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity, - colorvec=colorvec) -prob_ode_brusselator_2d_sparse = ODEProblem(f, - init_brusselator_2d(xyd_brusselator), - (0.,11.5),p) -@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false) -``` - -Notice the massive speed enhancement! - -## Defining Linear Solver Routines and Jacobian-Free Newton-Krylov - -A completely different way to optimize the linear solvers for large sparse -matrices is to use a Krylov subpsace method. This requires choosing a linear -solver for changing to a Krylov method. Optionally, one can use a Jacobian-free -operator to reduce the memory requirements. - -### Declaring a Jacobian-Free Newton-Krylov Implementation - -To swap the linear solver out, we use the `linsolve` command and choose the -GMRES linear solver. - -```julia -@btime solve(prob_ode_brusselator_2d,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) -@btime solve(prob_ode_brusselator_2d_sparse,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) -``` - -For more information on linear solver choices, see the -[linear solver documentation](https://docs.sciml.ai/dev/features/linear_nonlinear). - -On this problem, handling the sparsity correctly seemed to give much more of a -speedup than going to a Krylov approach, but that can be dependent on the problem -(and whether a good preconditioner is found). - -We can also enhance this by using a Jacobian-Free implementation of `f'(x)*v`. -To define the Jacobian-Free operator, we can use -[DiffEqOperators.jl](https://github.com/JuliaDiffEq/DiffEqOperators.jl) to generate -an operator `JacVecOperator` such that `Jv*v` performs `f'(x)*v` without building -the Jacobian matrix. - -```julia -using DiffEqOperators -Jv = JacVecOperator(brusselator_2d_loop,u0,p,0.0) -``` - -and then we can use this by making it our `jac_prototype`: - -```julia -f = ODEFunction(brusselator_2d_loop;jac_prototype=Jv) -prob_ode_brusselator_2d_jacfree = ODEProblem(f,u0,(0.,11.5),p) -@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) -``` - -### Adding a Preconditioner - -The [linear solver documentation](https://docs.sciml.ai/latest/features/linear_nonlinear/#iterativesolvers-jl-1) -shows how you can add a preconditioner to the GMRES. For example, you can -use packages like [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl) -to add an algebraic multigrid (AMG) or [IncompleteLU.jl](https://github.com/haampie/IncompleteLU.jl) -for an incomplete LU-factorization (iLU). - -```julia -using AlgebraicMultigrid -pc = aspreconditioner(ruge_stuben(jac_sparsity)) -@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES(Pl=pc)),save_everystep=false) -``` - -## Using Structured Matrix Types - -If your sparsity pattern follows a specific structure, for example a banded -matrix, then you can declare `jac_prototype` to be of that structure and then -additional optimizations will come for free. Note that in this case, it is -not necessary to provide a `colorvec` since the color vector will be analytically -derived from the structure of the matrix. - -The matrices which are allowed are those which satisfy the -[ArrayInterface.jl](https://github.com/JuliaDiffEq/ArrayInterface.jl) interface -for automatically-colorable matrices. These include: - -- Bidiagonal -- Tridiagonal -- SymTridiagonal -- BandedMatrix ([BandedMatrices.jl](https://github.com/JuliaMatrices/BandedMatrices.jl)) -- BlockBandedMatrix ([BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl)) - -Matrices which do not satisfy this interface can still be used, but the matrix -coloring will not be automatic, and an appropriate linear solver may need to -be given (otherwise it will default to attempting an LU-decomposition). - -## Sundials-Specific Handling - -While much of the setup makes the transition to using Sundials automatic, there -are some differences between the pure Julia implementations and the Sundials -implementations which must be taken note of. These are all detailed in the -[Sundials solver documentation](https://docs.sciml.ai/latest/solvers/ode_solve/#ode_solve_sundials-1), -but here we will highlight the main details which one should make note of. - -Defining a sparse matrix and a Jacobian for Sundials works just like any other -package. The core difference is in the choice of the linear solver. With Sundials, -the linear solver choice is done with a Symbol in the `linear_solver` from a -preset list. Particular choices of note are `:Band` for a banded matrix and -`:GMRES` for using GMRES. If you are using Sundials, `:GMRES` will not require -defining the JacVecOperator, and instead will always make use of a Jacobian-Free -Newton Krylov (with numerical differentiation). Thus on this problem we could do: - -```julia -using Sundials -# Sparse Version -@btime solve(prob_ode_brusselator_2d_sparse,CVODE_BDF(),save_everystep=false) -# GMRES Version: Doesn't require any extra stuff! -@btime solve(prob_ode_brusselator_2d,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) -``` - -Details for setting up a preconditioner with Sundials can be found at the -[Sundials solver page](https://docs.sciml.ai/latest/solvers/ode_solve/#ode_solve_sundials-1). - -## Handling Mass Matrices - -Instead of just defining an ODE as $u' = f(u,p,t)$, it can be common to express -the differential equation in the form with a mass matrix: - -$$Mu' = f(u,p,t)$$ - -where $M$ is known as the mass matrix. Let's solve the Robertson equation. -At the top we wrote this equation as: - -$$\begin{align} -dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ -dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ -dy_3 &= 3*10^7 y_{3}^2 \\ -\end{align}$$ - -But we can instead write this with a conservation relation: - -$$\begin{align} -dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ -dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ -1 &= y_{1} + y_{2} + y_{3} \\ -\end{align}$$ - -In this form, we can write this as a mass matrix ODE where $M$ is singular -(this is another form of a differential-algebraic equation (DAE)). Here, the -last row of `M` is just zero. We can implement this form as: - -```julia -using DifferentialEquations -function rober(du,u,p,t) - y₁,y₂,y₃ = u - k₁,k₂,k₃ = p - du[1] = -k₁*y₁+k₃*y₂*y₃ - du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃ - du[3] = y₁ + y₂ + y₃ - 1 - nothing -end -M = [1. 0 0 - 0 1. 0 - 0 0 0] -f = ODEFunction(rober,mass_matrix=M) -prob_mm = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) -sol = solve(prob_mm,Rodas5()) - -plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) -``` - -Note that if your mass matrix is singular, i.e. your system is a DAE, then you -need to make sure you choose -[a solver that is compatible with DAEs](https://docs.sciml.ai/latest/solvers/dae_solve/#dae_solve_full-1) - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/advanced/03-kolmogorov_equations.jmd b/tutorials/advanced/03-kolmogorov_equations.jmd deleted file mode 100644 index aefe1ed5..00000000 --- a/tutorials/advanced/03-kolmogorov_equations.jmd +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: Kolmogorov Backward Equations -author: Ashutosh Bharambe ---- - -```julia -using Flux, StochasticDiffEq -using NeuralNetDiffEq -using Plots -using CuArrays -using CUDAnative -``` -## Introduction on Backward Kolmogorov Equations - -The backward Kolmogorov Equation deals with a terminal condtion. -The one dimensional backward kolmogorov equation that we are going to deal with is of the form : - -$$ - \frac{\partial p}{\partial t} = -\mu(x)\frac{\partial p}{\partial x} - \frac{1}{2}{\sigma^2}(x)\frac{\partial^2 p}{\partial x^2} ,\hspace{0.5cm} p(T , x) = \varphi(x) -$$ -for all $ t \in{ [0 , T] } $ and for all $ x \in R^d $ - -#### The Black Scholes Model - -The Black-Scholes Model governs the price evolution of the European put or call option. In the below equation V is the price of some derivative , S is the Stock Price , r is the risk free interest -rate and σ the volatility of the stock returns. The payoff at a time T is known to us. And this makes it a terminal PDE. In case of an European put option the PDE is: -$$ - \frac{\partial V}{\partial t} + rS\frac{\partial V}{\partial S} + \frac{1}{2}{\sigma^2}{S^2}\frac{\partial^2 V}{\partial S^2} -rV = 0 ,\hspace{0.5cm} V(T , S) = max\{\mathcal{K} - S , 0 \} -$$ -for all $ t \in{ [0 , T] } $ and for all $ S \in R^d $ - -In order to make the above equation in the form of the Backward - Kolmogorov PDE we should substitute - -$$ - V(S , t) = e^{r(t-T)}p(S , t) -$$ -and thus we get -$$ - e^{r(t-T)}\frac{\partial p}{\partial t} + re^{r(t-T)}p(S , t) = -\mu(x)\frac{\partial p}{\partial x}e^{r(t-T)} - \frac{1}{2}{\sigma^2}(x)\frac{\partial^2 p}{\partial x^2}e^{r(t-T)} - + re^{r(t-T)}p(S , t) -$$ -And the terminal condition -$$ - p(S , T) = max\{ \mathcal{K} - x , 0 \} -$$ -We will train our model and the model itself will be the solution of the equation -## Defining the problem and the solver -We should start defining the terminal condition for our equation: -```julia -function phi(xi) - y = Float64[] - K = 100 - for x in eachcol(xi) - val = max(K - maximum(x) , 0.00) - y = push!(y , val) - end - y = reshape(y , 1 , size(y)[1] ) - return y -end -``` -Now we shall define the problem : -We will define the σ and μ by comparing it to the orignal equation. The xspan is the span of initial stock prices. -```julia -d = 1 -r = 0.04 -sigma = 0.2 -xspan = (80.00 , 115.0) -tspan = (0.0 , 1.0) -σ(du , u , p , t) = du .= sigma.*u -μ(du , u , p , t) = du .= r.*u -prob = KolmogorovPDEProblem(μ , σ , phi , xspan , tspan, d) -``` -Now once we have defined our problem it is necessary to define the parameters for the solver. -```julia -sdealg = EM() -ensemblealg = EnsembleThreads() -dt = 0.01 -dx = 0.01 -trajectories = 100000 -``` - -Now lets define our model m and the optimiser -```julia -m = Chain(Dense(d, 64, elu),Dense(64, 128, elu),Dense(128 , 16 , elu) , Dense(16 , 1)) -use_gpu = false -if CUDAnative.functional() == true - m = fmap(CuArrays.cu , m) - use_gpu = true -end -opt = Flux.ADAM(0.0005) -``` -And then finally call the solver -```julia -@time sol = solve(prob, NeuralNetDiffEq.NNKolmogorov(m, opt, sdealg, ensemblealg), verbose = true, dt = dt, - dx = dx , trajectories = trajectories , abstol=1e-6, maxiters = 1000 , use_gpu = use_gpu) -``` -## Analyzing the solution -Now let us find a Monte-Carlo Solution and plot the both: -```julia -monte_carlo_sol = [] -x_out = collect(85:2.00:110.00) -for x in x_out - u₀= [x] - g_val(du , u , p , t) = du .= 0.2.*u - f_val(du , u , p , t) = du .= 0.04.*u - dt = 0.01 - tspan = (0.0,1.0) - prob = SDEProblem(f_val,g_val,u₀,tspan) - output_func(sol,i) = (sol[end],false) - ensembleprob_val = EnsembleProblem(prob , output_func = output_func ) - sim_val = solve(ensembleprob_val, EM(), EnsembleThreads() , dt=0.01, trajectories=100000,adaptive=false) - s = reduce(hcat , sim_val.u) - mean_phi = sum(phi(s))/length(phi(s)) - global monte_carlo_sol = push!(monte_carlo_sol , mean_phi) -end - -``` - -##Plotting the Solutions -We should reshape the inputs and outputs to make it compatible with our model. This is the most important part. The algorithm gives a distributed function over all initial prices in the xspan. -```julia -x_model = reshape(x_out, 1 , size(x_out)[1]) -if use_gpu == true - m = fmap(cpu , m) -end -y_out = m(x_model) -y_out = reshape(y_out , 13 , 1) -``` -And now finally we can plot the solutions -```julia -plot(x_out , y_out , lw = 3 , xaxis="Initial Stock Price", yaxis="Payoff" , label = "NNKolmogorov") -plot!(x_out , monte_carlo_sol , lw = 3 , xaxis="Initial Stock Price", yaxis="Payoff" ,label = "Monte Carlo Solutions") -``` diff --git a/tutorials/advanced/Project.toml b/tutorials/advanced/Project.toml deleted file mode 100644 index 24dde183..00000000 --- a/tutorials/advanced/Project.toml +++ /dev/null @@ -1,38 +0,0 @@ -[deps] -AlgebraicMultigrid = "2169fc97-5a83-5252-b627-83903c6c433c" -BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" -CUDAnative = "be33ccc6-a3ff-5ff2-a52e-74243cff1e17" -CuArrays = "3a865a2d-5b23-5a0f-bc46-62713ec82fae" -DiffEqOperators = "9fdde737-9c7f-55bf-ade8-46b3f136cc48" -DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" -Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" -NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" -NeuralNetDiffEq = "8faf48c0-8b73-11e9-0e63-2155955bfa4d" -OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -SparseDiffTools = "47a9eef4-7e08-11e9-0b38-333d64bd3804" -SparsityDetection = "684fba80-ace3-11e9-3d08-3bc7ed6f96df" -StochasticDiffEq = "789caeaf-c7a9-5a7d-9973-96adeb23e2a0" -Sundials = "c3572dad-4567-51f8-b174-8c6c989267f4" -SciMLTutorials = "30cb0354-2223-46a9-baa0-41bdcfbe0178" - -[compat] -AlgebraicMultigrid = "0.3, 0.4" -BenchmarkTools = "0.5, 0.6, 0.7, 1.0" -CUDAnative = "3.1" -CuArrays = "2.2" -DiffEqOperators = "4.10" -DifferentialEquations = "6.14" -Flux = "0.10, 0.11, 0.12" -ModelingToolkit = "3.10, 4.0, 5.0" -NLsolve = "4.4" -NeuralNetDiffEq = "1.6" -OrdinaryDiffEq = "5.41" -Plots = "1.4" -SparseDiffTools = "1.9" -SparsityDetection = "0.3" -StochasticDiffEq = "6.23" -Sundials = "4.2" diff --git a/tutorials/exercises/01-workshop_exercises.jmd b/tutorials/exercises/01-workshop_exercises.jmd deleted file mode 100644 index 4f954dd4..00000000 --- a/tutorials/exercises/01-workshop_exercises.jmd +++ /dev/null @@ -1,682 +0,0 @@ ---- -title: SciML Workshop Exercises -author: Chris Rackauckas ---- - -These exercises teach common workflows which involve SciML's tools like -DifferentialEquations.jl, DiffEqFlux.jl, and the connections to parts like -stochastic differential equations and Bayesian estimation. -The designation (B) is for "Beginner", meaning that a user new to the package -should feel comfortable trying this exercise. An exercise designated (I) is -for "Intermediate", meaning the user may want to have some previous background -in DifferentialEquations.jl or try some (B) exercises first. The additional -(E) designation is for "Experienced", which are portions of exercises which may -take some work. - -The exercises are described as follows: - -- Exercise 1 takes the user through solving a stiff ordinary differential equation - and using the ModelingToolkit.jl to automatically convert the function to a - symbolic form to derive the analytical Jacobian to speed up the solver. The - same biological system is then solved with stochasticity, utilizing - EnsembleProblems to understand 95% bounds on the solution. Finally, - probabilistic programming is employed to perform Bayesian parameter estimation - of the parameters against data. -- Exercise 2 takes the user through defining hybrid delay differential equation, - that is a differential equation with events, and using differentiable programming - techniques (automatic differentiation) to to perform gradient-based parameter - estimation. -- Exercise 3 takes the user through differential-algebraic equation (DAE) - modeling, the concept of index, and using both mass-matrix and implicit - ODE representations. This will require doing a bit of math, but the student - will understand how to change their equations to make their DAE numerically - easier for the integrators. -- Exercise 4 takes the user through optimizing a PDE solver, utilizing - automatic sparsity pattern recognition, automatic conversion of numerical - codes to symbolic codes for analytical construction of the Jacobian, - preconditioned GMRES, and setting up a solver for IMEX and GPUs, and compute - adjoints of PDEs. -- Exercise 5 focuses on a chaotic orbit, utilizing parallel ensembles across - supercomputers and GPUs to quickly describe phase space. -- Exercise 6 takes the user through training a neural stochastic differential - equation, using GPU-accleration and adjoints through Flux.jl's neural - network framework to build efficient training codes. - -This exercise worksheet is meant to be a living document leading new users through -a deep dive of the DifferentialEquations.jl feature set. If you further suggestions -or want to contribute new problems, please open an issue or PR at the -SciMLTutorials.jl repository. - -# Problem 1: Investigating Sources of Randomness and Uncertainty in a Stiff Biological System (B) - -In this problem we will walk through the basics of simulating models with -DifferentialEquations.jl. Let's take the -[Oregonator model of the Belousov-Zhabotinskii chemical reaction system](https://www.radford.edu/~thompson/vodef90web/problems/demosnodislin/Demos_Pitagora/DemoOrego/demoorego.pdf). -This system describes a classical example in non-equilibrium thermodynmics -and is a well-known natural chemical oscillator. - -## Part 1: Simulating the Oregonator ODE model - -When modeling, usually one starts off by investigating the deterministic model. -The deterministic ODE formulation of the Oregonator is -given by the equations - -$$\begin{align} -\frac{dx}{dt} &= s(y-xy + x - qx^2)\\ -\frac{dy}{dt} &= (-y - xy + z)/s\\ -\frac{dz}{dt} &= w(x - z)\end{align}$$ - -with parameter values $s=77.27$, $w=0.161$, and $q=8.375 \times 10^{-6}$, and -initial conditions $x(0)=1$, $y(0)=2$, and $z(0)=3$. Use -[the tutorial on solving ODEs](https://docs.sciml.ai/dev/tutorials/ode_example) -to solve this differential equation on the -timespan of $t\in[0,360]$ with the default ODE solver. To investigate the result, -plot the solution of all components over time, and plot the phase space plot of -the solution (hint: use `vars=(1,2,3)`). What shape is being drawn in phase space? - -## Part 2: Investigating Stiffness - -Because the reaction rates of `q` vs `s` is very large, this model has a "fast" -system and a "slow" system. This is typical of ODEs which exhibit a property -known as stiffness. Stiffness changes the ODE solvers which can handle the -equation well. [Take a look at the ODE solver page](https://docs.sciml.ai/dev/solvers/ode_solve) -and investigate solving the equation using methods for non-stiff equations -(ex: `Tsit5`) and stiff equations (ex: `Rodas5`). - -Benchmark using $t\in[0,50]$ using `@btime` from BenchmarkTools.jl. What -happens when you increase the timespan? - -## (Optional) Part 3: Specifying Analytical Jacobians (I) - -Stiff ODE solvers internally utilize the Jacobian of the ODE system in order -to improve the stepsizes in the solution. However, computing and factorizing -the Jacobian is costly, and thus it can be beneficial to provide the analytical -solution. - -Use the -[ODEFunction definition page](https://docs.sciml.ai/dev/features/performance_overloads) -to define an `ODEFunction` which holds both the OREGO ODE and its Jacobian, and solve using `Rodas5`. - -## (Optional) Part 4: Automatic Symbolicification and Analytical Jacobian Calculations - -Deriving Jacobians by hand is tedious. Thankfully symbolic mathematical systems -can do the work for you. And thankfully, DifferentialEquations.jl has tools -to automatically convert numerical problems into symbolic problems to perform -the analysis on! - -follow the [ModelingToolkit.jl README](https://github.com/JuliaDiffEq/ModelingToolkit.jl) -to automatically convert your ODE definition -to its symbolic form using `modelingtoolkitize` and calculate the analytical -Jacobian. Use the compilation functions to build the `ODEFunction` with the -embedded analytical solution. - -## Part 5: Adding stochasticity with stochastic differential equations - -How does this system react in the presense of stochasticity? We can investigate -this question by using stochastic differential equations. A stochastic -differential equation formulation of this model is known as the multiplicative -noise model, is created with: - -$$\begin{align} -dx &= s(y-xy + x - qx^2)dt + \sigma_1 x dW_1\\ -dy &= \frac{-y - xy + z}{s}dt + \sigma_2 y dW_2\\ -dz &= w(x - z)dt + \sigma_3 z dW_3\end{align}$$ - -with $\sigma_i = 0.1$ where the `dW` terms describe a Brownian motion, a -continuous random process with normally distributed increments. Use the -[tutorial on solving SDEs](https://docs.sciml.ai/dev/tutorials/sde_example) -to solve simulate this model. Then, -[use the `EnsembleProblem`](https://docs.sciml.ai/dev/features/ensemble) -to generate and plot 100 trajectories of the stochastic model, and use -`EnsembleSummary` to plot the mean and 5%-95% region over time. - -Try solving with the `ImplicitRKMil` and `SOSRI` methods. Notice that it isn't -stiff every single time! - -(For fun, see if you can make the Euler-Maruyama `EM()` method solve this equation. -This requires a choice of `dt` small enough to be stable. This is the "standard" -method!) - -## Part 6: Gillespie jump models of discrete stochasticity - -When biological models have very few particles, continuous models no longer -make sense, and instead using the full discrete formulation can be required -to accuracy describe the dynamics. A discrete differential equation, or -Gillespie model, is a continuous-time Markov chain with Poisson-distributed -jumps. A discrete description of the Oregonator model is given by a chemical -reaction systems: - -```{julia;eval=false} -A+Y -> X+P -X+Y -> 2P -A+X -> 2X + 2Z -2X -> A + P (note: this has rate kX^2!) -B + Z -> Y -``` - -where reactions take place at a rate which is propoertional to its components, -i.e. the first reaction has a rate `k*A*Y` for some `k`. -Use the [tutorial on Gillespie SSA models](https://docs.sciml.ai/dev/tutorials/discrete_stochastic_example) -to implement the `JumpProblem` for this model, and use the `EnsembleProblem` -and `EnsembleSummary` to characterize the stochastic trajectories. - -For what rate constants does the model give the oscillatory dynamics for the -ODE approximation? For information on the true reaction rates, consult -[the original paper](https://pubs.acs.org/doi/abs/10.1021/ja00780a001). - -## Part 7: Probabilistic Programming / Bayesian Parameter Estimation with DiffEqBayes.jl + Turing.jl (I) - -In many casees, one comes to understand the proper values for their model's -parameters by utilizing data fitting techniques. In this case, we will use -the DiffEqBayes.jl library to perform a Bayesian estimation of the parameters. -For our data we will the following potential output: - -```{julia;eval=false} -t = 0.0:1.0:30.0 -data = [1.0 2.05224 2.11422 2.1857 2.26827 2.3641 2.47618 2.60869 2.7677 2.96232 3.20711 3.52709 3.97005 4.64319 5.86202 9.29322 536.068 82388.9 57868.4 1.00399 1.00169 1.00117 1.00094 1.00082 1.00075 1.0007 1.00068 1.00066 1.00065 1.00065 1.00065 - 2.0 1.9494 1.89645 1.84227 1.78727 1.73178 1.67601 1.62008 1.56402 1.50772 1.45094 1.39322 1.33366 1.2705 1.19958 1.10651 0.57194 0.180316 0.431409 251.774 591.754 857.464 1062.78 1219.05 1335.56 1419.88 1478.22 1515.63 1536.25 1543.45 1539.98 - 3.0 2.82065 2.68703 2.58974 2.52405 2.48644 2.47449 2.48686 2.52337 2.58526 2.67563 2.80053 2.9713 3.21051 3.5712 4.23706 12.0266 14868.8 24987.8 23453.4 19202.2 15721.6 12872.0 10538.8 8628.66 7064.73 5784.29 4735.96 3877.66 3174.94 2599.6] -``` - -[Follow the exmaples on the parameter estimation page](https://docs.sciml.ai/latest/analysis/parameter_estimation/#Bayesian-Inference-Examples-1) -to perform a Bayesian parameter estimation. What are the most likely parameters -for the model given the posterior parameter distributions? - -Use the `ODEProblem` to perform the fit. If you have time, use the `EnsembleProblem` -of `SDEProblem`s to perform a fit over averages of the SDE solutions. Note that -the SDE fit will take significantly more computational resources! See the GPU -parallelism section for details on how to accelerate this. - -## (Optional) Part 8: Using DiffEqBiological's Reaction Network DSL - -DiffEqBiological.jl is a helper library for the DifferentialEquations.jl -ecosystem for defining chemical reaction systems at a high leevel for easy -simulation in these various forms. Use the descrption -[from the Chemical Reaction Networks documentation page](https://docs.sciml.ai/dev/models/biological) -to build a reaction network and generate the ODE/SDE/jump equations, and -compare the result to your handcoded versions. - -# Problem 2: Fitting Hybrid Delay Pharmacokinetic Models with Automated Responses (B) - -Hybrid differential equations are differential equations with events, where -events are some interaction that occurs according to a prespecified condition. -For example, the bouncing ball is a classic hybrid differential equation given -by an ODE (Newton's Law of Gravity) mixed with the fact that, whenever the -ball hits the floor (`x=0`), then the velocity of the ball flips (`v=-v`). - -In addition, many models incorporate delays, that is the driving force of the -equation is dependent not on the current values, but values from the past. -These delay differential equations model how individuals in the economy act -on old information, or that biological processes take time to adapt to a new -environment. - -In this equation we will build a hybrid delayed pharmacokinetic model and -use the parameter estimation techniques to fit this it to a data. - -## Part 1: Defining an ODE with Predetermined Doses - -First, let's define the simplest hybrid ordinary differential equation: an ODE -where the events take place at fixed times. The ODE we will use is known as -the one-compartment model: - -$$\begin{align} -\frac{d[Depot]}{dt} &= -K_a [Depot] + R\\ -\frac{d[Central]}{dt} &= K_a [Depot] - K_e [Central]\end{align}$$ - -with $t \in [0,90]$, $u_0 = [100.0,0]$, and $p=[K_a,K_e]=[2.268,0.07398]$. - -With this model, use [the event handling documentation page](https://docs.sciml.ai/dev/features/callback_functions) -to define a `DiscreteCallback` which fires at `t ∈ [24,48,72]` and adds a -dose of 100 into `[Depot]`. (Hint: you'll want to set `tstops=[24,48,72]` to -force the ODE solver to step at these times). - -## Part 2: Adding Delays - -Now let's assume that instead of there being one compartment, there are many -transit compartment that the drug must move through in order to reach the -central compartment. This effectively delays the effect of the transition from -`[Depot]` to `[Central]`. To model this effect, we will use the delay -differential equation which utilizes a fixed time delay $\tau$: - -$$\begin{align} -\frac{d[Depot]}{dt} &= -K_a [Depot](t)\\ -\frac{d[Central]}{dt} &= K_a [Depot](t-\tau) - K_e [Central]\end{align}$$ - -where the parameter $τ = 6.0$. -[Use the DDE tutorial](https://docs.sciml.ai/dev/tutorials/dde_example) -to define and solve this delayed version of the hybrid model. - -## Part 3: Automatic Differentiation (AD) for Optimization (I) - -In order to fit parameters $(K_a,K_e,\tau)$ we will want to be able to calculate -the gradient of the solution with respect to the initial conditions. One way to -do this is via Automatic Differentition (AD). For small numbers of parameters -(<100), it is fastest to use Forward-Mode Automatic Differentition -(even faster than using adjoint sensitivity analysis!). Thus for this problem -we will make use of ForwardDiff.jl to use Dual number arithmetic to retrive -both the solution and its derivative w.r.t. parameters in a single solve. - -[Use the information from the page on local sensitvity analysis](https://docs.sciml.ai/dev/analysis/sensitivity) -to define the input dual numbers, solve the equation, and plot both the solution -over time and the derivative of the solution w.r.t. the parameters. - -## Part 4: Fitting Known Quantities with DiffEqParamEstim.jl + Optim.jl - -Now let's fit the delayed model to a dataset. For the data, use the array - -```{julia;eval=false} -t = 0.0:12.0:90.0 -data = [100.0 0.246196 0.000597933 0.24547 0.000596251 0.245275 0.000595453 0.245511 - 0.0 53.7939 16.8784 58.7789 18.3777 59.1879 18.5003 59.2611] -``` - -Use [the parameter estimation page](https://docs.sciml.ai/dev/analysis/parameter_estimation) -to define a loss function with `build_loss_objective` and optimize the parameters -against the data. What parameters were used to generate the data? - -## Part 5: Implementing Control-Based Logic with ContinuousCallbacks (I) - -Now that we have fit our delay differential equation model to the dataset, we -want to start testing out automated treatment strategies. Let's assume that -instead of giving doses at fixed time points, we invent a wearable which -monitors the patient and administers a dose whenever the internal drug -concentration falls below 25. To model this effect, we will need to use -`ContinuousCallbacks` to define a callback that triggers when `[Central]` falls -below the threshold value. - -[Use the documentation on the event handling page](https://docs.sciml.ai/dev/features/callback_functions) to define such a callback, -and plot the solution over time. How many times does the auto-doser administer -a dose? How much does this change as you change the delay time $\tau$? - -## Part 6: Global Sensitivity Analysis with the Morris and Sobol Methods - -To understand how the parameters effect the solution in a global sense, one -wants to use Global Sensitivity Analysis. Use the -[GSA documentation page](https://docs.sciml.ai/dev/analysis/global_sensitivity) -perform global sensitivity analysis and quantify the effect of the various -parameters on the solution. - -# Problem 3: Differential-Algebraic Equation Modeling of a Double Pendulum (B) - -Differential-Algebraic Equaton (DAE) systems are like ODEs but allow for adding -constraints into the models. This problem will look at solving the double -penulum problem with enforcement of the rigid body constraints, requiring that -the total distance `L` is constant throughout the simulation. While these -equations can be rewritten in an ODE form, in many cases it can be simpler -to solve the equation directly with the constraints. This tutorial will -cover both the idea of index, how to manually perform index reduction, -and how to make use of mass matrix and implicit ODE solvers to handle these -problems. - -## Part 1: Simple Introduction to DAEs: Mass-Matrix Robertson Equations - -A mass-matrix ordinary differential equation (ODE) is an ODE where the -left-hand side, the derivative side, is multiplied by a matrix known as the -mass matrix. This is described as: - -$$Mu' = f(u,p,t)$$ - -where $M$ is the mass matrix. When $M$ is invertible, there is an ODE which is -equivalent to this formulation. When $M$ is not invertible, this can have a -distinctly different behavior and is as Differential-Algebraic Equation (DAE). - -Solve the Robertson DAE: - -$$\begin{align} -\frac{dy_1}{dt} &= -0.04y_1 + 10^4 y_2y_3\\ -\frac{dy_2}{dt} &= 0.04y_1 - 10^4 y_2y_3 - 3\times 10^7 y_2^2\\ -1 &= y_1 + y_2 + y_3\end{align}$$ - -with $y(0) = [1,0,0]$ and $dy(0) = [-0.04,0.04,0.0]$ using the mass-matrix -formulation and `Rodas5()`. Use the -[ODEProblem page](https://docs.sciml.ai/dev/types/ode_types) -to find out how to declare a mass matrix. - -(Hint: what if the last row has all zeros?) - -## Part 2: Solving the Implicit Robertson Equations with IDA - -Use the [DAE Tutorial](https://docs.sciml.ai/dev/tutorials/dae_example) -to define a DAE in its implicit form and solve the Robertson equation with IDA. -Why is `differential_vars = [true,true,false]`? - -## Part 3: Manual Index Reduction of the Single Pendulum - -The index of a DAE is a notion used to measure distance from -its related ODE. There are many different definitions of index, -but we're going to stick to the idea of differential index: -the number of differentiations required to convert a system -of DAEs into explicit ODE form. DAEs of high index are -usually transformed via a procedure called index reduction. -The following example will demonstrate this. - -Consider the index 3 DAE system of the cartesian pendulum. -After writing down the force equations in both directions, -we arrive at the following DAE: - -$$ -\begin{align} -m\ddot{x} &= \frac{x}{L}T \\ -m\ddot{y} &= \frac{y}{L}T - mg \\ -x^2 + y^2 &= L -\end{align} -$$ - -Notice that we don't have an equation describing the -behaviour of `T`. Let us now perform index reduction to -extract an equation for `T` - -Differentiate this third equation twice with respect to time -to reduce it from index 3 to index 1. - -## Part 4: Single Pendulum Solution with IDA -Write these equations in implicit form and solve the system using -IDA. - -## Part 5: Solving the Double Penulum DAE System - -The following equations describe a double -pendulum system: -$$ -\begin{align} -m_2\ddot{x_2} &= \frac{x_2}{L_2}T_2 \\ -m_2\ddot{y_2} &= \frac{y_2}{L_2}T_2 - m_2g \\ -{x_2}^2 + {y_2}^2 &= L_2 \\ -m_1\ddot{x_1} &= \frac{x_1}{L_1}T_1 - \frac{x_2}{L_2}T_2 \\ -m_2\ddot{y_1} &= \frac{y_1}{L_1}T_2 - m_1g - \frac{y_2}{L_2}T_2 \\ -{x_1}^2 + {y_1}^2 &= L_1 \\ -\end{align} -$$ - -Perform index reduction and solve it like in the previous example. - -# Problem 4: Performance Optimizing and Parallelizing Semilinear PDE Solvers (I) - -This problem will focus on implementing and optimizing the solution of the -2-dimensional Brusselator equations. The BRUSS equations are a well-known -highly stiff oscillatory system of partial differential equations which are -used in stiff ODE solver benchmarks. In this tutorial we will walk first -through a simple implementation, then do allocation-free implementations and -looking deep into solver options and benchmarking. - -## Part 1: Implementing the BRUSS PDE System as ODEs - -The Brusselator PDE is defined as follows: - -$$\begin{align} -\frac{\partial u}{\partial t} &= 1 + u^2v - 4.4u + \alpha(\frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2}) + f(x, y, t)\\ -\frac{\partial v}{\partial t} &= 3.4u - u^2v + \alpha(\frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2})\end{align}$$ - -where - -$$f(x, y, t) = \begin{cases} -5 & \quad \text{if } (x-0.3)^2+(y-0.6)^2 ≤ 0.1^2 \text{ and } t ≥ 1.1 \\ -0 & \quad \text{else}\end{cases}$$ - -and the initial conditions are - -$$\begin{align} -u(x, y, 0) &= 22\cdot y(1-y)^{3/2} \\ -v(x, y, 0) &= 27\cdot x(1-x)^{3/2}\end{align}$$ - -with the periodic boundary condition - -$$\begin{align} -u(x+1,y,t) &= u(x,y,t) \\ -u(x,y+1,t) &= u(x,y,t)\end{align}$$ - -on a timespan of $t \in [0,22]$. - -To solve this PDE, we will discretize it into a system of ODEs with the finite -difference method. We discretize `u` and `v` into arrays of the values at each -time point: `u[i,j] = u(i*dx,j*dy)` for some choice of `dx`/`dy`, and same for -`v`. Then our ODE is defined with `U[i,j,k] = [u v]`. The second derivative -operator, the Laplacian, discretizes to become a tridiagonal matrix with -`[1 -2 1]` and a `1` in the top right and bottom left corners. The nonlinear functions -are then applied at each point in space (they are broadcast). Use `dx=dy=1/32`. - -You will know when you have the correct solution when you plot the solution -at `x=y=0` and see a periodic orbit, e.g., `ts=0:0.05:22; plot(ts, sol1.(ts, -idxs=1))`. - -If you are not familiar with this process, see -[the Gierer-Meinhardt example from the SciMLTutorials.](http://tutorials.sciml.ai/html/introduction/03-optimizing_diffeq_code.html) - -Note: Start by doing the simplest implementation! - -## Part 2: Optimizing the BRUSS Code - -PDEs are expensive to solve, and so we will go nowhere without some code -optimizing! Follow the steps described in the -[the Gierer-Meinhardt example from the SciMLTutorials](http://tutorials.sciml.ai/html/introduction/03-optimizing_diffeq_code.html) -to optimize your Brusselator code. Try other formulations and see what ends -up the fastest! Find a trade-off between performance and simplicity that suits -your needs. - -## Part 3: Exploiting Jacobian Sparsity with Color Differentiation - -Use the `sparsity!` function from [SparseDiffTools](https://github.com/JuliaDiffEq/SparseDiffTools.jl) -to generate the sparsity pattern for the Jacobian of this problem. Follow -the documentations [on the DiffEqFunction page](https://docs.sciml.ai/dev/features/performance_overloads) -to specify the sparsity pattern of the Jacobian. Generate an add the color -vector to speed up the computation of the Jacobian. - -## (Optional) Part 4: Structured Jacobians - -Specify the sparsity pattern using a BlockBandedMatrix from -[BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl) -to accelerate the previous sparsity handling tricks. - -## (Optional) Part 5: Automatic Symbolicification and Analytical Jacobian - -Use the `modelingtoolkitize` function from ModelingToolkit.jl to convert your -numerical ODE function into a symbolic ODE function and use that to compute and -solve with an analytical sparse Jacobian. - -## Part 6: Utilizing Preconditioned-GMRES Linear Solvers - -Use the [linear solver specification page](https://docs.sciml.ai/dev/features/linear_nonlinear) -to solve the equation with `TRBDF2` with GMRES. Use the Sundials documentation -to solve the equation with `CVODE_BDF` with Sundials' special internal GMRES. -To both of these, use the [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl) -to add a preconditioner to the GMRES solver. - -## Part 7: Exploring IMEX and Exponential Integrator Techniques (E) - -Instead of using the standard `ODEProblem`, define a [`SplitODEProblem`](https://docs.sciml.ai/dev/types/split_ode_types) -to move some of the equation to the "non-stiff part". Try different splits -and solve with `KenCarp4` to see if the solution can be accelerated. - -Next, use `MatrixFreeOperator` and `DiffEqArrayOperator` to define part of the equation as linear, and -use the `ETDRK4` exponential integrator to solve the equation. Note that this -technique is not appropriate for this equation since it relies on the -nonlinear term being non-stiff for best results. - -## Part 8: Work-Precision Diagrams for Benchmarking Solver Choices - -Use the `WorkPrecisionSet` method from -[DiffEqDevTools.jl](https://github.com/JuliaDiffEq/DiffEqDevTools.jl) to -benchmark multiple different solver methods and find out what combination is -most efficient. -[Take a look at DiffEqBenchmarks.jl](https://github.com/JuliaDiffEq/DiffEqBenchmarks.jl) -for usage examples. - -## Part 9: GPU-Parallelism for PDEs (E) - -Fully vectorize your implementation of the ODE and use a `CuArray` from -[CuArrays.jl](https://github.com/JuliaGPU/CuArrays.jl) as the initial condition -to cause the whole solution to be GPU accelerated. - -## Part 10: Adjoint Sensitivity Analysis for Gradients of PDEs - -In order to optimize the parameters of a PDE, you need to be able to compute -the gradient of the solution with respect to the parameters. This is done -through sensitivity analysis. For PDEs, generally the system is at a scale -where forward sensitivity analysis (forward-mode automatic differentiation) -is no longer suitable, and for these cases one uses adjoint sensitivity analysis. - -Rewrite the PDE so the constant terms are parameters, and use the -[adjoint sensitivity analysis](https://docs.sciml.ai/latest/analysis/sensitivity/#Adjoint-Sensitivity-Analysis-1) -documentation to solve for the solution gradient with a cost function being the -L2 distance of the solution from the value 1. Solve with interpolated and -checkpointed adjoints. Play with using reverse-mode automatic differentiation -vs direct computation of vector-Jacobian products using the `autojacvec` option -of the `SensitivityAlg`. Find the set of options most suitable for this PDE. - -If you have compute time, use this adjoint to optimize the parameters of the -PDE with respect to this cost function. - -# Problem 5: Global Parameter Sensitivity and Optimality with GPU and Distributed Ensembles (B) - -In this example we will investigate how the parameters "generally" effect the -solution in the chaotic Henon-Heiles system. By "generally" we will use global -sensitivity analysis methods to get an average global characterization of the -parameters on the solution. In addition to a global sensitivity approach, we -will generate large ensembles of solutions with different parameters using -a GPU-based parallelism approach. - -## Part 1: Implementing the Henon-Heiles System (B) - -The Henon-Heiles Hamiltonian system is described by the ODEs: - -$$\begin{align} -\frac{dp_1}{dt} &= -q_1 (1 + 2q_2)\\ -\frac{dp_2}{dt} &= -q_2 - (q_1^2 - q_2^2)\\ -\frac{dq_1}{dt} &= p_1\\ -\frac{dq_2}{dt} &= p_2\end{align}$$ - -with initial conditions $u_0 = [0.1,0.0,0.0,0.5]$. -Solve this system over the timespan $t\in[0,1000]$ - -## (Optional) Part 2: Alternative Dynamical Implmentations of Henon-Heiles (B) - -The Henon-Heiles defines a Hamiltonian system with certain structures which -can be utilized for a more efficient solution. Use [the Dynamical problems page](https://docs.sciml.ai/dev/types/dynamical_types) -to define a `SecondOrderODEProblem` corresponding to the acceleration terms: - -$$\begin{align} -\frac{d^2q_1}{dt^2} &= -q_1 (1 + 2q_2)\\ -\frac{d^2q_2}{dt^2} &= -q_2 - (q_1^2 - q_2^2)\end{align}$$ - -Solve this with a method that is specific to dynamical problems, like `DPRKN6`. - -The Hamiltonian can also be directly described: - -$$H(p,q) = \frac{1}{2}(p_1^2 + p_2^2) + \frac{1}{2}(q_1^2+q_2^2+2q_1^2 q_2 - \frac{2}{3}q_2^3)$$ - -Solve this problem using the `HamiltonianProblem` constructor from DiffEqPhysics.jl. - -## Part 3: Parallelized Ensemble Solving - -To understand the orbits of the Henon-Heiles system, it can be useful to solve -the system with many different initial conditions. Use the -[ensemble interface](https://docs.sciml.ai/dev/features/ensemble) -to solve with randomized initial conditions in parallel using threads with -`EnsembleThreads()`. Then, use `addprocs()` to add more cores and solve using -`EnsembleDistributed()`. The former will solve using all of the cores on a -single computer, while the latter will use all of the cores on which there -are processors, which can include thousands across a supercomputer! See -[Julia's parallel computing setup page](https://docs.julialang.org/en/v1/manual/parallel-computing/index.html) -for more details on the setup. - -## Part 4: Parallelized GPU Ensemble Solving - -Setup the CUDAnative.jl library and use the `EnsembleGPUArray()` method to -parallelize the solution across the thousands of cores of a GPU. Note that -this will efficiency solve for hundreds of thousands of trajectores. - -# Problem 6: Training Neural Stochastic Differential Equations with GPU acceleration (I) - -In the previous models we had to define a model. Now let's shift the burden of -model-proofing onto data by utilizing neural differential equations. A neural -differential equation is a differential equation where the model equations -are replaced, either in full or in part, by a neural network. For example, a -neural ordinary differential equation is an equation $u^\prime = f(u,p,t)$ -where $f$ is a neural network. We can learn this neural network from data using -various methods, the easiest of which is known as the single shooting method, -where one chooses neural network parameters, solves the equation, and checks -the ODE's solution against data as a loss. - -In this example we will define and train various forms of neural differential -equations. Note that all of the differential equation types are compatible with -neural differential equations, so this is only going to scratch the surface of -the possibilites! - -## Part 1: Constructing and Training a Basic Neural ODE - -Use the [DiffEqFlux.jl README](https://github.com/JuliaDiffEq/DiffEqFlux.jl) to -construct a neural ODE to train against the training data: - -```{julia;eval=false} -u0 = Float32[2.; 0.] -datasize = 30 -tspan = (0.0f0,1.5f0) - -function trueODEfunc(du,u,p,t) - true_A = [-0.1 2.0; -2.0 -0.1] - du .= ((u.^3)'true_A)' -end -t = range(tspan[1],tspan[2],length=datasize) -prob = ODEProblem(trueODEfunc,u0,tspan) -ode_data = Array(solve(prob,Tsit5(),saveat=t)) -``` - -## Part 2: GPU-accelerating the Neural ODE Process - -Use the `gpu` function from Flux.jl to transform all of the calculations onto -the GPU and train the neural ODE using GPU-accelerated `Tsit5` with adjoints. - -## Part 3: Defining and Training a Mixed Neural ODE - -Gather data from the Lotka-Volterra equation: - -```{julia;eval=false} -function lotka_volterra(du,u,p,t) - x, y = u - α, β, δ, γ = p - du[1] = dx = α*x - β*x*y - du[2] = dy = -δ*y + γ*x*y -end -u0 = [1.0,1.0] -tspan = (0.0,10.0) -p = [1.5,1.0,3.0,1.0] -prob = ODEProblem(lotka_volterra,u0,tspan,p) -sol = Array(solve(prob,Tsit5())(0.0:1.0:10.0)) -``` - -Now use the -[mixed neural section of the documentation](https://github.com/JuliaDiffEq/DiffEqFlux.jl#mixed-neural-des) -to define the mixed neural ODE where the functional form of $\frac{dx}{dt}$ is -known, and try to derive a neural formulation for $\frac{dy}{dt}$ directly from -the data. - -## Part 4: Constructing a Basic Neural SDE - -Generate data from the Lotka-Volterra equation with multiplicative noise - -```{julia;eval=false} -function lotka_volterra(du,u,p,t) - x, y = u - α, β, δ, γ = p - du[1] = dx = α*x - β*x*y - du[2] = dy = -δ*y + γ*x*y -end -function lv_noise(du,u,p,t) - du[1] = p[5]*u[1] - du[2] = p[6]*u[2] -end -u0 = [1.0,1.0] -tspan = (0.0,10.0) -p = [1.5,1.0,3.0,1.0,0.1,0.1] -prob = SDEProblem(lotka_volterra,lv_noise,u0,tspan,p) -sol = [Array(solve(prob,SOSRI())(0.0:1.0:10.0)) for i in 1:20] # 20 solution samples -``` - -Train a neural stochastic differential equation $dX = f(X)dt + g(X)dW_t$ where -both the drift ($f$) and the diffusion ($g$) functions are neural networks. -See if constraining $g$ can make the problem easier to fit. - -## Part 5: Optimizing the training behavior with minibatching (E) - -Use minibatching on the data to improve the training procedure. An example -[can be found at this PR](https://github.com/FluxML/model-zoo/pull/88). diff --git a/tutorials/exercises/02-workshop_solutions.jmd b/tutorials/exercises/02-workshop_solutions.jmd deleted file mode 100644 index aa42cb24..00000000 --- a/tutorials/exercises/02-workshop_solutions.jmd +++ /dev/null @@ -1,723 +0,0 @@ ---- -title: SciML Workshop Exercise Solutions -author: Chris Rackauckas ---- - -```julia -using DifferentialEquations -using Sundials -using BenchmarkTools -using Plots -``` - -# Problem 1: Investigating Sources of Randomness and Uncertainty in a Biological System - -## Part 1: Simulating the Oregonator ODE model - -```julia -using DifferentialEquations, Plots -function orego(du,u,p,t) - s,q,w = p - y1,y2,y3 = u - du[1] = s*(y2+y1*(1-q*y1-y2)) - du[2] = (y3-(1+y1)*y2)/s - du[3] = w*(y1-y3) -end -p = [77.27,8.375e-6,0.161] -prob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,360.0),p) -sol = solve(prob) -plot(sol) -``` - -```julia -plot(sol,vars=(1,2,3)) -``` - -## Part 2: Investigating Stiffness - -```julia -using BenchmarkTools -prob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,50.0),p) -@btime sol = solve(prob,Tsit5()) -``` - -```julia -@btime sol = solve(prob,Rodas5()) -``` - -## (Optional) Part 3: Specifying Analytical Jacobians (I) - -## (Optional) Part 4: Automatic Symbolicification and Analytical Jacobian Calculations - -## Part 5: Adding stochasticity with stochastic differential equations - -```julia -function orego(du,u,p,t) - s,q,w = p - y1,y2,y3 = u - du[1] = s*(y2+y1*(1-q*y1-y2)) - du[2] = (y3-(1+y1)*y2)/s - du[3] = w*(y1-y3) -end -function g(du,u,p,t) - du[1] = 0.1u[1] - du[2] = 0.1u[2] - du[3] = 0.1u[3] -end -p = [77.27,8.375e-6,0.161] -prob = SDEProblem(orego,g,[1.0,2.0,3.0],(0.0,30.0),p) -sol = solve(prob,SOSRI()) -plot(sol) -``` - -```julia -sol = solve(prob,ImplicitRKMil()); plot(sol) -``` - -```julia -sol = solve(prob,ImplicitRKMil()); plot(sol) -``` - -## Part 6: Gillespie jump models of discrete stochasticity - -## Part 7: Probabilistic Programming / Bayesian Parameter Estimation with DiffEqBayes.jl + Turing.jl (I) - -The data was generated with: - -```julia -function orego(du,u,p,t) - s,q,w = p - y1,y2,y3 = u - du[1] = s*(y2+y1*(1-q*y1-y2)) - du[2] = (y3-(1+y1)*y2)/s - du[3] = w*(y1-y3) -end -p = [60.0,1e-5,0.2] -prob = ODEProblem(orego,[1.0,2.0,3.0],(0.0,30.0),p) -sol = solve(prob,Rodas5(),abstol=1/10^14,reltol=1/10^14) -``` - -## (Optional) Part 8: Using DiffEqBiological's Reaction Network DSL - -# Problem 2: Fitting Hybrid Delay Pharmacokinetic Models with Automated Responses (B) - -## Part 1: Defining an ODE with Predetermined Doses - -```julia -function onecompartment(du,u,p,t) - Ka,Ke = p - du[1] = -Ka*u[1] - du[2] = Ka*u[1] - Ke*u[2] -end -p = (Ka=2.268,Ke=0.07398) -prob = ODEProblem(onecompartment,[100.0,0.0],(0.0,90.0),p) - -tstops = [24,48,72] -condition(u,t,integrator) = t ∈ tstops -affect!(integrator) = (integrator.u[1] += 100) -cb = DiscreteCallback(condition,affect!) -sol = solve(prob,Tsit5(),callback=cb,tstops=tstops) -plot(sol) -``` - -## Part 2: Adding Delays - -```julia -function onecompartment_delay(du,u,h,p,t) - Ka,Ke,τ = p - delayed_depot = h(p,t-τ)[1] - du[1] = -Ka*u[1] - du[2] = Ka*delayed_depot - Ke*u[2] -end -p = (Ka=2.268,Ke=0.07398,τ=6.0) -h(p,t) = [0.0,0.0] -prob = DDEProblem(onecompartment_delay,[100.0,0.0],h,(0.0,90.0),p) - -tstops = [24,48,72] -condition(u,t,integrator) = t ∈ tstops -affect!(integrator) = (integrator.u[1] += 100) -cb = DiscreteCallback(condition,affect!) -sol = solve(prob,MethodOfSteps(Rosenbrock23()),callback=cb,tstops=tstops) -plot(sol) -``` - -## Part 3: Automatic Differentiation (AD) for Optimization (I) - -## Part 4: Fitting Known Quantities with DiffEqParamEstim.jl + Optim.jl - -The data was generated with - -```julia -p = (Ka = 0.5, Ke = 0.1, τ = 4.0) -``` - -## Part 5: Implementing Control-Based Logic with ContinuousCallbacks (I) - -## Part 6: Global Sensitivity Analysis with the Morris and Sobol Methods - -# Problem 3: Differential-Algebraic Equation Modeling of a Double Pendulum (B) - -## Part 1: Simple Introduction to DAEs: Mass-Matrix Robertson Equations -```julia -function f(du, u, p, t) - du[1] = -p[1]*u[1] + p[2]*u[2]*u[3] - du[2] = p[1]*u[1] - p[2]*u[2]*u[3] - p[3]*u[2]*u[2] - du[3] = u[1] + u[2] + u[3] - 1. -end -M = [1 0 0; 0 1 0; 0 0 0.] -p = [0.04, 10^4, 3e7] -u0 = [1.,0.,0.] -tspan = (0., 1e6) -prob = ODEProblem(ODEFunction(f, mass_matrix = M), u0, tspan, p) -sol = solve(prob, Rodas5()) -plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) -``` - -## Part 2: Solving the Implicit Robertson Equations with IDA -```julia -# Robertson Equation DAE Implicit form -function h(out, du, u, p, t) - out[1] = -p[1]*u[1] + p[2]*u[2]*u[3] - du[1] - out[2] = p[1]*u[1] - p[2]*u[2]*u[3] - p[3]*u[2]*u[2] - du[2] - out[3] = u[1] + u[2] + u[3] - 1. -end -p = [0.04, 10^4, 3e7] -du0 = [-0.04, 0.04, 0.0] -u0 = [1.,0.,0.] -tspan = (0., 1e6) -differential_vars = [true, true, false] -prob = DAEProblem(h, du0, u0, tspan, p, differential_vars = differential_vars) -sol = solve(prob, IDA()) -plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) -``` - -## Part 3: Manual Index Reduction of the Single Pendulum -Consider the equation: -$$ -x^2 + y^2 = L -$$ -Differentiating once with respect to time: -$$ -2x\dot{x} + 2y\dot{y} = 0 -$$ -A second time: -$$ -\begin{align} -{\dot{x}}^2 + x\ddot{x} + {\dot{y}}^2 + y\ddot{y} &= 0 \\ -u^2 + v^2 + x(\frac{x}{mL}T) + y(\frac{y}{mL}T - g) &= 0 \\ -u^2 + v^2 + \frac{x^2 + y^2}{mL}T - yg &= 0 \\ -u^2 + v^2 + \frac{T}{m} - yg &= 0 -\end{align} -$$ - -Our final set of equations is hence -$$ -\begin{align} - \ddot{x} &= \frac{x}{mL}T \\ - \ddot{y} &= \frac{y}{mL}T - g \\ - \dot{x} &= u \\ - \dot{y} &= v \\ - u^2 + v^2 -yg + \frac{T}{m} &= 0 -\end{align} -$$ - -We finally obtain $T$ into the third equation. -This required two differentiations with respect -to time, and so our system of equations went from -index 3 to index 1. Now our solver can handle the -index 1 system. - -## Part 4: Single Pendulum Solution with IDA -```julia -function f(out, da, a, p, t) - (L, m, g) = p - u, v, x, y, T = a - du, dv, dx, dy, dT = da - out[1] = x*T/(m*L) - du - out[2] = y*T/(m*L) - g - dv - out[3] = u - dx - out[4] = v - dy - out[5] = u^2 + v^2 - y*g + T/m - nothing -end - -# Release pendulum from top right -u0 = zeros(5) -u0[3] = 1.0 -du0 = zeros(5) -du0[2] = 9.81 - -p = [1,1,9.8] -tspan = (0.,100.) - -differential_vars = [true, true, true, true, false] -prob = DAEProblem(f, du0, u0, tspan, p, differential_vars = differential_vars) -sol = solve(prob, IDA()) -plot(sol, vars=(3,4)) -``` - -## Part 5: Solving the Double Penulum DAE System -For the double pendulum: -The equations for the second ball are the same -as the single pendulum case. That is, the equations -for the second ball are: -$$ -\begin{align} - \ddot{x_2} &= \frac{x_2}{m_2L_2}T_2 \\ - \ddot{y_2} &= \frac{y_2}{m_2L_2}T_2 - g \\ - \dot{x_2} &= u \\ - \dot{y_2} &= v \\ - u_2^2 + v_2^2 -y_2g + \frac{T_2}{m_2} &= 0 -\end{align} -$$ -For the first ball, consider $x_1^2 + y_1^2 = L $ -$$ -\begin{align} -x_1^2 + x_2^2 &= L \\ -2x_1\dot{x_1} + 2y_1\dot{y_1} &= 0 \\ -\dot{x_1}^2 + \dot{y_1}^2 + x_1(\frac{x_1}{m_1L_1}T_1 - \frac{x_2}{m_1L_2}T_2) + y_1(\frac{y_1}{m_1L_1}T_1 - g - \frac{y_2}{m_1L_2}T_2) &= 0 \\ -u_1^2 + v_1^2 + \frac{T_1}{m_1} - \frac{x_1x_2 + y_1y_2}{m_1L_2}T_2 &= 0 -\end{align} -$$ - -So the final equations are: -$$ -\begin{align} - \dot{u_2} &= x_2*T_2/(m_2*L_2) - \dot{v_2} &= y_2*T_2/(m_2*L_2) - g - \dot{x_2} &= u_2 - \dot{y_2} &= v_2 - u_2^2 + v_2^2 -y_2*g + \frac{T_2}{m_2} &= 0 - - \dot{u_1} &= x_1*T_1/(m_1*L_1) - x_2*T_2/(m_2*L_2) - \dot{v_1} &= y_1*T_1/(m_1*L_1) - g - y_2*T_2/(m_2*L_2) - \dot{x_1} &= u_1 - \dot{y_1} &= v_1 - u_1^2 + v_1^2 + \frac{T_1}{m_1} + - \frac{-x_1*x_2 - y_1*y_2}{m_1L_2}T_2 - y_1g &= 0 -\end{align} -$$ -```julia -function f(out, da, a, p, t) - L1, m1, L2, m2, g = p - - u1, v1, x1, y1, T1, - u2, v2, x2, y2, T2 = a - - du1, dv1, dx1, dy1, dT1, - du2, dv2, dx2, dy2, dT2 = da - - out[1] = x2*T2/(m2*L2) - du2 - out[2] = y2*T2/(m2*L2) - g - dv2 - out[3] = u2 - dx2 - out[4] = v2 - dy2 - out[5] = u2^2 + v2^2 -y2*g + T2/m2 - - out[6] = x1*T1/(m1*L1) - x2*T2/(m2*L2) - du1 - out[7] = y1*T1/(m1*L1) - g - y2*T2/(m2*L2) - dv1 - out[8] = u1 - dx1 - out[9] = v1 - dy1 - out[10] = u1^2 + v1^2 + T1/m1 + - (-x1*x2 - y1*y2)/(m1*L2)*T2 - y1*g - nothing -end - -# Release pendulum from top right -u0 = zeros(10) -u0[3] = 1.0 -u0[8] = 1.0 -du0 = zeros(10) -du0[2] = 9.8 -du0[7] = 9.8 - -p = [1,1,1,1,9.8] -tspan = (0.,100.) - -differential_vars = [true, true, true, true, false, - true, true, true, true, false] -prob = DAEProblem(f, du0, u0, tspan, p, differential_vars = differential_vars) -sol = solve(prob, IDA()) - -plot(sol, vars=(3,4)) -plot(sol, vars=(8,9)) -``` - -# Problem 4: Performance Optimizing and Parallelizing Semilinear PDE Solvers (I) -## Part 1: Implementing the BRUSS PDE System as ODEs - -```julia -using DifferentialEquations, Sundials, Plots - -# initial condition -function init_brusselator_2d(xyd) - N = length(xyd) - u = zeros(N, N, 2) - for I in CartesianIndices((N, N)) - x = xyd[I[1]] - y = xyd[I[2]] - u[I,1] = 22*(y*(1-y))^(3/2) - u[I,2] = 27*(x*(1-x))^(3/2) - end - u -end - -N = 32 - -xyd_brusselator = range(0,stop=1,length=N) - -u0 = vec(init_brusselator_2d(xyd_brusselator)) - -tspan = (0, 22.) - -p = (3.4, 1., 10., xyd_brusselator) - -brusselator_f(x, y, t) = ifelse((((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) && - (t >= 1.1), 5., 0.) - - -using LinearAlgebra, SparseArrays -du = ones(N-1) -D2 = spdiagm(-1 => du, 0=>fill(-2.0, N), 1 => du) -D2[1, N] = D2[N, 1] = 1 -D2 = 1/step(xyd_brusselator)^2*D2 -tmp = Matrix{Float64}(undef, N, N) -function brusselator_2d_op(du, u, (D2, tmp, p), t) - A, B, α, xyd = p - dx = step(xyd) - N = length(xyd) - α = α/dx^2 - du = reshape(du, N, N, 2) - u = reshape(u, N, N, 2) - @views for i in axes(u, 3) - ui = u[:, :, i] - dui = du[:, :, i] - mul!(tmp, D2, ui) - mul!(dui, ui, D2') - dui .+= tmp - end - - @inbounds begin - for I in CartesianIndices((N, N)) - x = xyd[I[1]] - y = xyd[I[2]] - i = I[1] - j = I[2] - - du[i,j,1] = α*du[i,j,1] + B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t) - du[i,j,2] = α*du[i,j,2] + A*u[i,j,1] - u[i,j,1]^2*u[i,j,2] - end - end - nothing -end - -prob1 = ODEProblem(brusselator_2d_op, u0, tspan, (D2, tmp, p)) - -sol1 = @time solve(prob1, TRBDF2(autodiff=false)); -``` - -Visualizing the solution (works best in a terminal): - -```{julia;eval=false} -@gif for t in sol1.t[1]:0.1:sol1.t[end] - off = N^2 - solt = sol1(t) - plt1 = surface(reshape(solt[1:off], N, N), zlims=(0, 5), leg=false) - surface!(plt1, reshape(solt[off+1:end], N, N), zlims=(0, 5), leg=false) - display(plt1) -end -``` - - -## Part 2: Optimizing the BRUSS Code - -```julia -function brusselator_2d_loop(du, u, p, t) - A, B, α, xyd = p - dx = step(xyd) - N = length(xyd) - α = α/dx^2 - limit = a -> let N=N - a == N+1 ? 1 : - a == 0 ? N : - a - end - II = LinearIndices((N, N, 2)) - - @inbounds begin - for I in CartesianIndices((N, N)) - x = xyd[I[1]] - y = xyd[I[2]] - i = I[1] - j = I[2] - ip1 = limit(i+1) - im1 = limit(i-1) - jp1 = limit(j+1) - jm1 = limit(j-1) - - ii1 = II[i,j,1] - ii2 = II[i,j,2] - - du[II[i,j,1]] = α*(u[II[im1,j,1]] + u[II[ip1,j,1]] + u[II[i,jp1,1]] + u[II[i,jm1,1]] - 4u[ii1]) + - B + u[ii1]^2*u[ii2] - (A + 1)*u[ii1] + brusselator_f(x, y, t) - - du[II[i,j,2]] = α*(u[II[im1,j,2]] + u[II[ip1,j,2]] + u[II[i,jp1,2]] + u[II[i,jm1,2]] - 4u[II[i,j,2]]) + - A*u[ii1] - u[ii1]^2*u[ii2] - end - end - nothing -end - -prob2 = ODEProblem(brusselator_2d_loop, u0, tspan, p) - -sol2 = @time solve(prob2, TRBDF2()) -sol2_2 = @time solve(prob2, CVODE_BDF()) -``` - -## Part 3: Exploiting Jacobian Sparsity with Color Differentiation - -```julia -using SparseDiffTools, SparsityDetection - -sparsity_pattern = jacobian_sparsity(brusselator_2d_loop,similar(u0),u0,p,2.0) -jac_sp = sparse(sparsity_pattern) -jac = Float64.(jac_sp) -colors = matrix_colors(jac) -prob3 = ODEProblem(ODEFunction(brusselator_2d_loop, colorvec=colors,jac_prototype=jac_sp), u0, tspan, p) -sol3 = @time solve(prob3, TRBDF2()) -``` - -## (Optional) Part 4: Structured Jacobians - -## (Optional) Part 5: Automatic Symbolicification and Analytical Jacobian - -## Part 6: Utilizing Preconditioned-GMRES Linear Solvers - -```julia -using DiffEqOperators -using Sundials -using AlgebraicMultigrid: ruge_stuben, aspreconditioner, smoothed_aggregation -prob6 = ODEProblem(ODEFunction(brusselator_2d_loop, jac_prototype=JacVecOperator{Float64}(brusselator_2d_loop, u0)), u0, tspan, p) -II = Matrix{Float64}(I, N, N) -Op = kron(Matrix{Float64}(I, 2, 2), kron(D2, II) + kron(II, D2)) -Wapprox = -I+Op -#ml = ruge_stuben(Wapprox) -ml = smoothed_aggregation(Wapprox) -precond = aspreconditioner(ml) -sol_trbdf2 = @time solve(prob6, TRBDF2(linsolve=LinSolveGMRES())); # no preconditioner -sol_trbdf2 = @time solve(prob6, TRBDF2(linsolve=LinSolveGMRES(Pl=lu(Wapprox)))); # sparse LU -sol_trbdf2 = @time solve(prob6, TRBDF2(linsolve=LinSolveGMRES(Pl=precond))); # AMG -sol_cvodebdf = @time solve(prob2, CVODE_BDF(linear_solver=:GMRES)); -``` - -## Part 7: Exploring IMEX and Exponential Integrator Techniques (E) - -```julia -function laplacian2d(du, u, p, t) - A, B, α, xyd = p - dx = step(xyd) - N = length(xyd) - du = reshape(du, N, N, 2) - u = reshape(u, N, N, 2) - @inbounds begin - α = α/dx^2 - limit = a -> let N=N - a == N+1 ? 1 : - a == 0 ? N : - a - end - for I in CartesianIndices((N, N)) - x = xyd[I[1]] - y = xyd[I[2]] - i = I[1] - j = I[2] - ip1 = limit(i+1) - im1 = limit(i-1) - jp1 = limit(j+1) - jm1 = limit(j-1) - du[i,j,1] = α*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) - du[i,j,2] = α*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) - end - end - nothing -end -function brusselator_reaction(du, u, p, t) - A, B, α, xyd = p - dx = step(xyd) - N = length(xyd) - du = reshape(du, N, N, 2) - u = reshape(u, N, N, 2) - @inbounds begin - for I in CartesianIndices((N, N)) - x = xyd[I[1]] - y = xyd[I[2]] - i = I[1] - j = I[2] - du[i,j,1] = B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t) - du[i,j,2] = A*u[i,j,1] - u[i,j,1]^2*u[i,j,2] - end - end - nothing -end -prob7 = SplitODEProblem(laplacian2d, brusselator_reaction, u0, tspan, p) -sol7 = @time solve(prob7, KenCarp4()) -M = MatrixFreeOperator((du,u,p)->laplacian2d(du, u, p, 0), (p,), size=(2*N^2, 2*N^2), opnorm=1000) -prob7_2 = SplitODEProblem(M, brusselator_reaction, u0, tspan, p) -sol7_2 = @time solve(prob7_2, ETDRK4(krylov=true), dt=1) -prob7_3 = SplitODEProblem(DiffEqArrayOperator(Op), brusselator_reaction, u0, tspan, p) -sol7_3 = solve(prob7_3, KenCarp4()); -``` - -## Part 8: Work-Precision Diagrams for Benchmarking Solver Choices - -```julia -using DiffEqDevTools -abstols = 0.1 .^ (5:8) -reltols = 0.1 .^ (1:4) -sol = solve(prob3,CVODE_BDF(linear_solver=:GMRES),abstol=1/10^7,reltol=1/10^10) -test_sol = TestSolution(sol) -probs = [prob2, prob3, prob6] -setups = [Dict(:alg=>CVODE_BDF(),:prob_choice => 1), - Dict(:alg=>CVODE_BDF(linear_solver=:GMRES), :prob_choice => 1), - Dict(:alg=>TRBDF2(), :prob_choice => 1), - Dict(:alg=>TRBDF2(linsolve=LinSolveGMRES(Pl=precond)), :prob_choice => 3), - Dict(:alg=>TRBDF2(), :prob_choice => 2) - ] -labels = ["CVODE_BDF (dense)" "CVODE_BDF (GMRES)" "TRBDF2 (dense)" "TRBDF2 (sparse)" "TRBDF2 (GMRES)"] -wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=[test_sol,test_sol,test_sol],save_everystep=false,numruns=3, - names=labels, print_names=true, seconds=0.5) -plot(wp) -``` - -## Part 9: GPU-Parallelism for PDEs (E) - -## Part 10: Adjoint Sensitivity Analysis for Gradients of PDEs - -# Problem 5: Global Parameter Sensitivity and Optimality with GPU and Distributed Ensembles (B) - -## Part 1: Implementing the Henon-Heiles System (B) - -```julia -function henon(dz,z,p,t) - p₁, p₂, q₁, q₂ = z[1], z[2], z[3], z[4] - dp₁ = -q₁*(1 + 2q₂) - dp₂ = -q₂-(q₁^2 - q₂^2) - dq₁ = p₁ - dq₂ = p₂ - - dz .= [dp₁, dp₂, dq₁, dq₂] - return nothing -end - -u₀ = [0.1, 0.0, 0.0, 0.5] -prob = ODEProblem(henon, u₀, (0., 1000.)) -sol = solve(prob, Vern9(), abstol=1e-14, reltol=1e-14) - -plot(sol, vars=[(3,4,1)], tspan=(0,100)) -``` - -## (Optional) Part 2: Alternative Dynamical Implmentations of Henon-Heiles (B) - -```julia -function henon(ddz,dz,z,p,t) - p₁, p₂ = dz[1], dz[2] - q₁, q₂ = z[1], z[2] - ddq₁ = -q₁*(1 + 2q₂) - ddq₂ = -q₂-(q₁^2 - q₂^2) - - ddz .= [ddq₁, ddq₂] -end - -p₀ = u₀[1:2] -q₀ = u₀[3:4] -prob2 = SecondOrderODEProblem(henon, p₀, q₀, (0., 1000.)) -sol = solve(prob2, DPRKN6(), abstol=1e-10, reltol=1e-10) - -plot(sol, vars=[(3,4)], tspan=(0,100)) - -H(p, q, params) = 1/2 * (p[1]^2 + p[2]^2) + 1/2 * (q[1]^2 + q[2]^2 + 2q[1]^2 * q[2] - 2/3*q[2]^3) - -prob3 = HamiltonianProblem(H, p₀, q₀, (0., 1000.)) -sol = solve(prob3, DPRKN6(), abstol=1e-10, reltol=1e-10) - -plot(sol, vars=[(3,4)], tspan=(0,100)) -``` - -## Part 3: Parallelized Ensemble Solving - -In order to solve with an ensamble we need some initial conditions. -```julia -function generate_ics(E,n) - # The hardcoded values bellow can be estimated by looking at the - # figures in the Henon-Heiles 1964 article - qrange = range(-0.4, stop = 1.0, length = n) - prange = range(-0.5, stop = 0.5, length = n) - z0 = Vector{Vector{typeof(E)}}() - for q in qrange - V = H([0,0],[0,q],nothing) - V ≥ E && continue - for p in prange - T = 1/2*p^2 - T + V ≥ E && continue - z = [√(2(E-V-T)), p, 0, q] - push!(z0, z) - end - end - return z0 -end - -z0 = generate_ics(0.125, 10) - -function prob_func(prob,i,repeat) - @. prob.u0 = z0[i] - prob -end - -ensprob = EnsembleProblem(prob, prob_func=prob_func) -sim = solve(ensprob, Vern9(), EnsembleThreads(), trajectories=length(z0)) - -plot(sim, vars=(3,4), tspan=(0,10)) -``` - -## Part 4: Parallelized GPU Ensemble Solving - -In order to use GPU parallelization we must make all inputs -(initial conditions, tspan, etc.) `Float32` and the function -definition should be in the in-place form, avoid bound checking and -return `nothing`. - -```julia -using DiffEqGPU - -function henon_gpu(dz,z,p,t) - @inbounds begin - dz[1] = -z[3]*(1 + 2z[4]) - dz[2] = -z[4]-(z[3]^2 - z[4]^2) - dz[3] = z[1] - dz[4] = z[2] - end - return nothing -end - -z0 = generate_ics(0.125f0, 50) -prob_gpu = ODEProblem(henon_gpu, Float32.(u₀), (0.f0, 1000.f0)) -ensprob = EnsembleProblem(prob_gpu, prob_func=prob_func) -sim = solve(ensprob, Tsit5(), EnsembleGPUArray(), trajectories=length(z0)) -``` -# Problem 6: Training Neural Stochastic Differential Equations with GPU acceleration (I) - -## Part 1: Constructing and Training a Basic Neural ODE - -## Part 2: GPU-accelerating the Neural ODE Process - -## Part 3: Defining and Training a Mixed Neural ODE - -## Part 4: Constructing a Basic Neural SDE - -## Part 5: Optimizing the training behavior with minibatching (E) - -# Information on the Build - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/exercises/Project.toml b/tutorials/exercises/Project.toml deleted file mode 100644 index 202a026b..00000000 --- a/tutorials/exercises/Project.toml +++ /dev/null @@ -1,30 +0,0 @@ -[deps] -AlgebraicMultigrid = "2169fc97-5a83-5252-b627-83903c6c433c" -BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" -DiffEqDevTools = "f3b72e0c-5b89-59e1-b016-84e28bfd966d" -DiffEqFlux = "aae7a2af-3d4f-5e19-a356-7da93b79d9d0" -DiffEqGPU = "071ae1c0-96b5-11e9-1965-c90190d839ea" -DiffEqOperators = "9fdde737-9c7f-55bf-ade8-46b3f136cc48" -DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" -Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" -Optim = "429524aa-4258-5aef-a3af-852621145aeb" -Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -SparseDiffTools = "47a9eef4-7e08-11e9-0b38-333d64bd3804" -SparsityDetection = "684fba80-ace3-11e9-3d08-3bc7ed6f96df" -Sundials = "c3572dad-4567-51f8-b174-8c6c989267f4" -SciMLTutorials = "30cb0354-2223-46a9-baa0-41bdcfbe0178" - -[compat] -AlgebraicMultigrid = "0.3, 0.4" -BenchmarkTools = "0.5, 0.6, 0.7, 1.0" -DiffEqDevTools = "2.22" -DiffEqFlux = "1.16" -DiffEqGPU = "1.3" -DiffEqOperators = "4.10" -DifferentialEquations = "6" -Flux = "0.10, 0.11, 0.12" -Optim = "0.21, 0.22, 1.0" -Plots = "1.5" -SparseDiffTools = "1.9" -SparsityDetection = "0.3" -Sundials = "4.2" diff --git a/tutorials/introduction/01-ode_introduction.jmd b/tutorials/introduction/01-ode_introduction.jmd deleted file mode 100644 index c4ce7994..00000000 --- a/tutorials/introduction/01-ode_introduction.jmd +++ /dev/null @@ -1,346 +0,0 @@ ---- -title: An Intro to DifferentialEquations.jl -author: Chris Rackauckas ---- - -## Basic Introduction Via Ordinary Differential Equations - -This notebook will get you started with DifferentialEquations.jl by introducing you to the functionality for solving ordinary differential equations (ODEs). The corresponding documentation page is the [ODE tutorial](https://docs.sciml.ai/dev/tutorials/ode_example/). While some of the syntax may be different for other types of equations, the same general principles hold in each case. Our goal is to give a gentle and thorough introduction that highlights these principles in a way that will help you generalize what you have learned. - -### Background - -If you are new to the study of differential equations, it can be helpful to do a quick background read on [the definition of ordinary differential equations](https://en.wikipedia.org/wiki/Ordinary_differential_equation). We define an ordinary differential equation as an equation which describes the way that a variable $u$ changes, that is - -$$u' = f(u,p,t)$$ - -where $p$ are the parameters of the model, $t$ is the time variable, and $f$ is the nonlinear model of how $u$ changes. The initial value problem also includes the information about the starting value: - -$$u(t_0) = u_0$$ - -Together, if you know the starting value and you know how the value will change with time, then you know what the value will be at any time point in the future. This is the intuitive definition of a differential equation. - -### First Model: Exponential Growth - -Our first model will be the canonical exponential growth model. This model says that the rate of change is proportional to the current value, and is this: - -$$u' = au$$ - -where we have a starting value $u(0)=u_0$. Let's say we put 1 dollar into Bitcoin which is increasing at a rate of $98\%$ per year. Then calling now $t=0$ and measuring time in years, our model is: - -$$u' = 0.98u$$ - -and $u(0) = 1.0$. We encode this into Julia by noticing that, in this setup, we match the general form when - -```julia -f(u,p,t) = 0.98u -``` - -with $ u_0 = 1.0 $. If we want to solve this model on a time span from `t=0.0` to `t=1.0`, then we define an `ODEProblem` by specifying this function `f`, this initial condition `u0`, and this time span as follows: - -```julia -using DifferentialEquations -f(u,p,t) = 0.98u -u0 = 1.0 -tspan = (0.0,1.0) -prob = ODEProblem(f,u0,tspan) -``` - -To solve our `ODEProblem` we use the command `solve`. - -```julia -sol = solve(prob) -``` - -and that's it: we have succesfully solved our first ODE! - -#### Analyzing the Solution - -Of course, the solution type is not interesting in and of itself. We want to understand the solution! The documentation page which explains in detail the functions for analyzing the solution is the [Solution Handling](https://docs.sciml.ai/dev/basics/solution/) page. Here we will describe some of the basics. You can plot the solution using the plot recipe provided by [Plots.jl](http://docs.juliaplots.org/dev/): - -```julia -using Plots; gr() -plot(sol) -``` - -From the picture we see that the solution is an exponential curve, which matches our intuition. As a plot recipe, we can annotate the result using any of the [Plots.jl attributes](http://docs.juliaplots.org/dev/attributes/). For example: - -```julia -plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line", - xaxis="Time (t)",yaxis="u(t) (in μm)",label="My Thick Line!") # legend=false -``` - -Using the mutating `plot!` command we can add other pieces to our plot. For this ODE we know that the true solution is $u(t) = u_0 exp(at)$, so let's add some of the true solution to our plot: - -```julia -plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!") -``` - -In the previous command I demonstrated `sol.t`, which grabs the array of time points that the solution was saved at: - -```julia -sol.t -``` - -We can get the array of solution values using `sol.u`: - -```julia -sol.u -``` - -`sol.u[i]` is the value of the solution at time `sol.t[i]`. We can compute arrays of functions of the solution values using standard comprehensions, like: - -```julia -[t+u for (u,t) in tuples(sol)] -``` - -However, one interesting feature is that, by default, the solution is a continuous function. If we check the print out again: - -```julia -sol -``` - -you see that it says that the solution has a order changing interpolation. The default algorithm automatically switches between methods in order to handle all types of problems. For non-stiff equations (like the one we are solving), it is a continuous function of 4th order accuracy. We can call the solution as a function of time `sol(t)`. For example, to get the value at `t=0.45`, we can use the command: - -```julia -sol(0.45) -``` - -#### Controlling the Solver - -DifferentialEquations.jl has a common set of solver controls among its algorithms which can be found [at the Common Solver Options](https://docs.sciml.ai/dev/basics/common_solver_opts/) page. We will detail some of the most widely used options. - -The most useful options are the tolerances `abstol` and `reltol`. These tell the internal adaptive time stepping engine how precise of a solution you want. Generally, `reltol` is the relative accuracy while `abstol` is the accuracy when `u` is near zero. These tolerances are local tolerances and thus are not global guarantees. However, a good rule of thumb is that the total solution accuracy is 1-2 digits less than the relative tolerances. Thus for the defaults `abstol=1e-6` and `reltol=1e-3`, you can expect a global accuracy of about 1-2 digits. If we want to get around 6 digits of accuracy, we can use the commands: - -```julia -sol = solve(prob,abstol=1e-8,reltol=1e-8) -``` - -Now we can see no visible difference against the true solution: - - -```julia -plot(sol) -plot!(sol.t, t->1.0*exp(0.98t),lw=3,ls=:dash,label="True Solution!") -``` - -Notice that by decreasing the tolerance, the number of steps the solver had to take was `9` instead of the previous `5`. There is a trade off between accuracy and speed, and it is up to you to determine what is the right balance for your problem. - -Another common option is to use `saveat` to make the solver save at specific time points. For example, if we want the solution at an even grid of `t=0.1k` for integers `k`, we would use the command: - -```julia -sol = solve(prob,saveat=0.1) -``` - -Notice that when `saveat` is used the continuous output variables are no longer saved and thus `sol(t)`, the interpolation, is only first order. We can save at an uneven grid of points by passing a collection of values to `saveat`. For example: - -```julia -sol = solve(prob,saveat=[0.2,0.7,0.9]) -``` - -If we need to reduce the amount of saving, we can also turn off the continuous output directly via `dense=false`: - -```julia -sol = solve(prob,dense=false) -``` - -and to turn off all intermediate saving we can use `save_everystep=false`: - -```julia -sol = solve(prob,save_everystep=false) -``` - -If we want to solve and only save the final value, we can even set `save_start=false`. - -```julia -sol = solve(prob,save_everystep=false,save_start = false) -``` - -Note that similarly on the other side there is `save_end=false`. - -More advanced saving behaviors, such as saving functionals of the solution, are handled via the `SavingCallback` in the [Callback Library](https://docs.sciml.ai/dev/features/callback_library/#saving_callback-1) which will be addressed later in the tutorial. - -#### Choosing Solver Algorithms - -There is no best algorithm for numerically solving a differential equation. When you call `solve(prob)`, DifferentialEquations.jl makes a guess at a good algorithm for your problem, given the properties that you ask for (the tolerances, the saving information, etc.). However, in many cases you may want more direct control. A later notebook will help introduce the various *algorithms* in DifferentialEquations.jl, but for now let's introduce the *syntax*. - -The most crucial determining factor in choosing a numerical method is the stiffness of the model. Stiffness is roughly characterized by a Jacobian `f` with large eigenvalues. That's quite mathematical, and we can think of it more intuitively: if you have big numbers in `f` (like parameters of order `1e5`), then it's probably stiff. Or, as the creator of the MATLAB ODE Suite, Lawrence Shampine, likes to define it, if the standard algorithms are slow, then it's stiff. We will go into more depth about diagnosing stiffness in a later tutorial, but for now note that if you believe your model may be stiff, you can hint this to the algorithm chooser via `alg_hints = [:stiff]`. - -```julia -sol = solve(prob,alg_hints=[:stiff]) -``` - -Stiff algorithms have to solve implicit equations and linear systems at each step so they should only be used when required. - -If we want to choose an algorithm directly, you can pass the algorithm type after the problem as `solve(prob,alg)`. For example, let's solve this problem using the `Tsit5()` algorithm, and just for show let's change the relative tolerance to `1e-6` at the same time: - -```julia -sol = solve(prob,Tsit5(),reltol=1e-6) -``` - -### Systems of ODEs: The Lorenz Equation - -Now let's move to a system of ODEs. The [Lorenz equation](https://en.wikipedia.org/wiki/Lorenz_system) is the famous "butterfly attractor" that spawned chaos theory. It is defined by the system of ODEs: - -$$ -\begin{align} -\frac{dx}{dt} &= \sigma (y - x)\\ -\frac{dy}{dt} &= x (\rho - z) -y\\ -\frac{dz}{dt} &= xy - \beta z -\end{align} -$$ - -To define a system of differential equations in DifferentialEquations.jl, we define our `f` as a vector function with a vector initial condition. Thus, for the vector `u = [x,y,z]'`, we have the derivative function: - -```julia -function lorenz!(du,u,p,t) - σ,ρ,β = p - du[1] = σ*(u[2]-u[1]) - du[2] = u[1]*(ρ-u[3]) - u[2] - du[3] = u[1]*u[2] - β*u[3] -end -``` - -Notice here we used the in-place format which writes the output to the preallocated vector `du`. For systems of equations the in-place format is faster. We use the initial condition $u_0 = [1.0,0.0,0.0]$ as follows: - -```julia -u0 = [1.0,0.0,0.0] -``` - -Lastly, for this model we made use of the parameters `p`. We need to set this value in the `ODEProblem` as well. For our model we want to solve using the parameters $\sigma = 10$, $\rho = 28$, and $\beta = 8/3$, and thus we build the parameter collection: - -```julia -p = (10,28,8/3) # we could also make this an array, or any other type! -``` - -Now we generate the `ODEProblem` type. In this case, since we have parameters, we add the parameter values to the end of the constructor call. Let's solve this on a time span of `t=0` to `t=100`: - -```julia -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan,p) -``` - -Now, just as before, we solve the problem: - -```julia -sol = solve(prob) -``` - -The same solution handling features apply to this case. Thus `sol.t` stores the time points and `sol.u` is an array storing the solution at the corresponding time points. - -However, there are a few extra features which are good to know when dealing with systems of equations. First of all, `sol` also acts like an array. `sol[i]` returns the solution at the `i`th time point. - -```julia -sol.t[10],sol[10] -``` - -Additionally, the solution acts like a matrix where `sol[j,i]` is the value of the `j`th variable at time `i`: - -```julia -sol[2,10] -``` - -We can get a real matrix by performing a conversion: - -```julia -A = Array(sol) -``` - -This is the same as sol, i.e. `sol[i,j] = A[i,j]`, but now it's a true matrix. Plotting will by default show the time series for each variable: - -```julia -plot(sol) -``` - -If we instead want to plot values against each other, we can use the `vars` command. Let's plot variable `1` against variable `2` against variable `3`: - -```julia -plot(sol,vars=(1,2,3)) -``` - -This is the classic Lorenz attractor plot, where the `x` axis is `u[1]`, the `y` axis is `u[2]`, and the `z` axis is `u[3]`. Note that the plot recipe by default uses the interpolation, but we can turn this off: - -```julia -plot(sol,vars=(1,2,3),denseplot=false) -``` - -Yikes! This shows how calculating the continuous solution has saved a lot of computational effort by computing only a sparse solution and filling in the values! Note that in vars, `0=time`, and thus we can plot the time series of a single component like: - -```julia -plot(sol,vars=(0,2)) -``` - -## Internal Types - -The last basic user-interface feature to explore is the choice of types. DifferentialEquations.jl respects your input types to determine the internal types that are used. Thus since in the previous cases, when we used `Float64` values for the initial condition, this meant that the internal values would be solved using `Float64`. We made sure that time was specified via `Float64` values, meaning that time steps would utilize 64-bit floats as well. But, by simply changing these types we can change what is used internally. - -As a quick example, let's say we want to solve an ODE defined by a matrix. To do this, we can simply use a matrix as input. - -```julia -A = [1. 0 0 -5 - 4 -2 4 -3 - -4 0 0 1 - 5 -2 2 3] -u0 = rand(4,2) -tspan = (0.0,1.0) -f(u,p,t) = A*u -prob = ODEProblem(f,u0,tspan) -sol = solve(prob) -``` - -There is no real difference from what we did before, but now in this case `u0` is a `4x2` matrix. Because of that, the solution at each time point is matrix: - -```julia -sol[3] -``` - -In DifferentialEquations.jl, you can use any type that defines `+`, `-`, `*`, `/`, and has an appropriate `norm`. For example, if we want arbitrary precision floating point numbers, we can change the input to be a matrix of `BigFloat`: - -```julia -big_u0 = big.(u0) -``` - -and we can solve the `ODEProblem` with arbitrary precision numbers by using that initial condition: - -```julia -prob = ODEProblem(f,big_u0,tspan) -sol = solve(prob) -``` - -```julia -sol[1,3] -``` - -To really make use of this, we would want to change `abstol` and `reltol` to be small! Notice that the type for "time" is different than the type for the dependent variables, and this can be used to optimize the algorithm via keeping multiple precisions. We can convert time to be arbitrary precision as well by defining our time span with `BigFloat` variables: - -```julia -prob = ODEProblem(f,big_u0,big.(tspan)) -sol = solve(prob) -``` - -Let's end by showing a more complicated use of types. For small arrays, it's usually faster to do operations on static arrays via the package [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl). The syntax is similar to that of normal arrays, but for these special arrays we utilize the `@SMatrix` macro to indicate we want to create a static array. - -```julia -using StaticArrays -A = @SMatrix [ 1.0 0.0 0.0 -5.0 - 4.0 -2.0 4.0 -3.0 - -4.0 0.0 0.0 1.0 - 5.0 -2.0 2.0 3.0] -u0 = @SMatrix rand(4,2) -tspan = (0.0,1.0) -f(u,p,t) = A*u -prob = ODEProblem(f,u0,tspan) -sol = solve(prob) -``` - -```julia -sol[3] -``` - -## Conclusion - -These are the basic controls in DifferentialEquations.jl. All equations are defined via a problem type, and the `solve` command is used with an algorithm choice (or the default) to get a solution. Every solution acts the same, like an array `sol[i]` with `sol.t[i]`, and also like a continuous function `sol(t)` with a nice plot command `plot(sol)`. The Common Solver Options can be used to control the solver for any equation type. Lastly, the types used in the numerical solving are determined by the input types, and this can be used to solve with arbitrary precision and add additional optimizations (this can be used to solve via GPUs for example!). While this was shown on ODEs, these techniques generalize to other types of equations as well. - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/02-choosing_algs.jmd b/tutorials/introduction/02-choosing_algs.jmd deleted file mode 100644 index 4e4e81ae..00000000 --- a/tutorials/introduction/02-choosing_algs.jmd +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: Choosing an ODE Algorithm -author: Chris Rackauckas ---- - -While the default algorithms, along with `alg_hints = [:stiff]`, will suffice in most cases, there are times when you may need to exert more control. The purpose of this part of the tutorial is to introduce you to some of the most widely used algorithm choices and when they should be used. The corresponding page of the documentation is the [ODE Solvers](https://docs.sciml.ai/dev/solvers/ode_solve/) page which goes into more depth. - -## Diagnosing Stiffness - -One of the key things to know for algorithm choices is whether your problem is stiff. Let's take for example the driven Van Der Pol equation: - -```julia -using DifferentialEquations, ParameterizedFunctions -van! = @ode_def VanDerPol begin - dy = μ*((1-x^2)*y - x) - dx = 1*y -end μ - -prob = ODEProblem(van!,[0.0,2.0],(0.0,6.3),1e6) -``` - -One indicating factor that should alert you to the fact that this model may be stiff is the fact that the parameter is `1e6`: large parameters generally mean stiff models. If we try to solve this with the default method: - -```julia -sol = solve(prob,Tsit5()) -``` - -Here it shows that maximum iterations were reached. Another thing that can happen is that the solution can return that the solver was unstable (exploded to infinity) or that `dt` became too small. If these happen, the first thing to do is to check that your model is correct. It could very well be that you made an error that causes the model to be unstable! - -If the model is the problem, then stiffness could be the reason. We can thus hint to the solver to use an appropriate method: - -```julia -sol = solve(prob,alg_hints = [:stiff]) -``` - -Or we can use the default algorithm. By default, DifferentialEquations.jl uses algorithms like `AutoTsit5(Rodas5())` which automatically detect stiffness and switch to an appropriate method once stiffness is known. - -```julia -sol = solve(prob) -``` - -Another way to understand stiffness is to look at the solution. - -```julia -using Plots; gr() -sol = solve(prob,alg_hints = [:stiff],reltol=1e-6) -plot(sol,denseplot=false) -``` - -Let's zoom in on the y-axis to see what's going on: - -```julia -plot(sol,ylims = (-10.0,10.0)) -``` - -Notice how there are some extreme vertical shifts that occur. These vertical shifts are places where the derivative term is very large, and this is indicative of stiffness. This is an extreme example to highlight the behavior, but this general idea can be carried over to your problem. When in doubt, simply try timing using both a stiff solver and a non-stiff solver and see which is more efficient. - -To try this out, let's use BenchmarkTools, a package that let's us relatively reliably time code blocks. - -```julia -function lorenz!(du,u,p,t) - σ,ρ,β = p - du[1] = σ*(u[2]-u[1]) - du[2] = u[1]*(ρ-u[3]) - u[2] - du[3] = u[1]*u[2] - β*u[3] -end -u0 = [1.0,0.0,0.0] -p = (10,28,8/3) -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan,p) -``` - -And now, let's use the `@btime` macro from benchmark tools to compare the use of non-stiff and stiff solvers on this problem. - -```julia -using BenchmarkTools -@btime solve(prob); -``` - -```julia -@btime solve(prob,alg_hints = [:stiff]); -``` - -In this particular case, we can see that non-stiff solvers get us to the solution much more quickly. - -## The Recommended Methods - -When picking a method, the general rules are as follows: - -- Higher order is more efficient at lower tolerances, lower order is more efficient at higher tolerances -- Adaptivity is essential in most real-world scenarios -- Runge-Kutta methods do well with non-stiff equations, Rosenbrock methods do well with small stiff equations, BDF methods do well with large stiff equations - -While there are always exceptions to the rule, those are good guiding principles. Based on those, a simple way to choose methods is: - -- The default is `Tsit5()`, a non-stiff Runge-Kutta method of Order 5 -- If you use low tolerances (`1e-8`), try `Vern7()` or `Vern9()` -- If you use high tolerances, try `BS3()` -- If the problem is stiff, try `Rosenbrock23()`, `Rodas5()`, or `CVODE_BDF()` -- If you don't know, use `AutoTsit5(Rosenbrock23())` or `AutoVern9(Rodas5())`. - -(This is a simplified version of the default algorithm chooser) - -## Comparison to other Software - -If you are familiar with MATLAB, SciPy, or R's DESolve, here's a quick translation start to have transfer your knowledge over. - -- `ode23` -> `BS3()` -- `ode45`/`dopri5` -> `DP5()`, though in most cases `Tsit5()` is more efficient -- `ode23s` -> `Rosenbrock23()`, though in most cases `Rodas4()` is more efficient -- `ode113` -> `VCABM()`, though in many cases `Vern7()` is more efficient -- `dop853` -> `DP8()`, though in most cases `Vern7()` is more efficient -- `ode15s`/`vode` -> `QNDF()`, though in many cases `CVODE_BDF()`, `Rodas4()` - or `radau()` are more efficient -- `ode23t` -> `Trapezoid()` for efficiency and `GenericTrapezoid()` for robustness -- `ode23tb` -> `TRBDF2` -- `lsoda` -> `lsoda()` (requires `]add LSODA; using LSODA`) -- `ode15i` -> `IDA()`, though in many cases `Rodas4()` can handle the DAE and is - significantly more efficient - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/03-optimizing_diffeq_code.jmd b/tutorials/introduction/03-optimizing_diffeq_code.jmd deleted file mode 100644 index 656d37ec..00000000 --- a/tutorials/introduction/03-optimizing_diffeq_code.jmd +++ /dev/null @@ -1,492 +0,0 @@ ---- -title: Optimizing DiffEq Code -author: Chris Rackauckas ---- - -In this notebook we will walk through some of the main tools for optimizing your code in order to efficiently solve DifferentialEquations.jl. User-side optimizations are important because, for sufficiently difficult problems, most of the time will be spent inside of your `f` function, the function you are trying to solve. "Efficient" integrators are those that reduce the required number of `f` calls to hit the error tolerance. The main ideas for optimizing your DiffEq code, or any Julia function, are the following: - -- Make it non-allocating -- Use StaticArrays for small arrays -- Use broadcast fusion -- Make it type-stable -- Reduce redundant calculations -- Make use of BLAS calls -- Optimize algorithm choice - -We'll discuss these strategies in the context of small and large systems. Let's start with small systems. - -## Optimizing Small Systems (<100 DEs) - -Let's take the classic Lorenz system from before. Let's start by naively writing the system in its out-of-place form: - -```julia -function lorenz(u,p,t) - dx = 10.0*(u[2]-u[1]) - dy = u[1]*(28.0-u[3]) - u[2] - dz = u[1]*u[2] - (8/3)*u[3] - [dx,dy,dz] -end -``` - -Here, `lorenz` returns an object, `[dx,dy,dz]`, which is created within the body of `lorenz`. - -This is a common code pattern from high-level languages like MATLAB, SciPy, or R's deSolve. However, the issue with this form is that it allocates a vector, `[dx,dy,dz]`, at each step. Let's benchmark the solution process with this choice of function: - -```julia -using DifferentialEquations, BenchmarkTools -u0 = [1.0;0.0;0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz,u0,tspan) -@benchmark solve(prob,Tsit5()) -``` - -The BenchmarkTools package's `@benchmark` runs the code multiple times to get an accurate measurement. The minimum time is the time it takes when your OS and other background processes aren't getting in the way. Notice that in this case it takes about 5ms to solve and allocates around 11.11 MiB. However, if we were to use this inside of a real user code we'd see a lot of time spent doing garbage collection (GC) to clean up all of the arrays we made. Even if we turn off saving we have these allocations. - -```julia -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -The problem of course is that arrays are created every time our derivative function is called. This function is called multiple times per step and is thus the main source of memory usage. To fix this, we can use the in-place form to ***make our code non-allocating***: - -```julia -function lorenz!(du,u,p,t) - du[1] = 10.0*(u[2]-u[1]) - du[2] = u[1]*(28.0-u[3]) - u[2] - du[3] = u[1]*u[2] - (8/3)*u[3] -end -``` - -Here, instead of creating an array each time, we utilized the cache array `du`. When the inplace form is used, DifferentialEquations.jl takes a different internal route that minimizes the internal allocations as well. When we benchmark this function, we will see quite a difference. - -```julia -u0 = [1.0;0.0;0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz!,u0,tspan) -@benchmark solve(prob,Tsit5()) -``` - -```julia -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -There is a 4x time difference just from that change! Notice there are still some allocations and this is due to the construction of the integration cache. But this doesn't scale with the problem size: - -```julia -tspan = (0.0,500.0) # 5x longer than before -prob = ODEProblem(lorenz!,u0,tspan) -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -since that's all just setup allocations. - -#### But if the system is small we can optimize even more. - -Allocations are only expensive if they are "heap allocations". For a more in-depth definition of heap allocations, [there are a lot of sources online](http://net-informations.com/faq/net/stack-heap.htm). But a good working definition is that heap allocations are variable-sized slabs of memory which have to be pointed to, and this pointer indirection costs time. Additionally, the heap has to be managed and the garbage controllers has to actively keep track of what's on the heap. - -However, there's an alternative to heap allocations, known as stack allocations. The stack is statically-sized (known at compile time) and thus its accesses are quick. Additionally, the exact block of memory is known in advance by the compiler, and thus re-using the memory is cheap. This means that allocating on the stack has essentially no cost! - -Arrays have to be heap allocated because their size (and thus the amount of memory they take up) is determined at runtime. But there are structures in Julia which are stack-allocated. `struct`s for example are stack-allocated "value-type"s. `Tuple`s are a stack-allocated collection. The most useful data structure for DiffEq though is the `StaticArray` from the package [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl). These arrays have their length determined at compile-time. They are created using macros attached to normal array expressions, for example: - -```julia -using StaticArrays -A = @SVector [2.0,3.0,5.0] -``` - -Notice that the `3` after `SVector` gives the size of the `SVector`. It cannot be changed. Additionally, `SVector`s are immutable, so we have to create a new `SVector` to change values. But remember, we don't have to worry about allocations because this data structure is stack-allocated. `SArray`s have a lot of extra optimizations as well: they have fast matrix multiplication, fast QR factorizations, etc. which directly make use of the information about the size of the array. Thus, when possible they should be used. - -Unfortunately static arrays can only be used for sufficiently small arrays. After a certain size, they are forced to heap allocate after some instructions and their compile time balloons. Thus static arrays shouldn't be used if your system has more than 100 variables. Additionally, only the native Julia algorithms can fully utilize static arrays. - -Let's ***optimize `lorenz` using static arrays***. Note that in this case, we want to use the out-of-place allocating form, but this time we want to output a static array: - -```julia -function lorenz_static(u,p,t) - dx = 10.0*(u[2]-u[1]) - dy = u[1]*(28.0-u[3]) - u[2] - dz = u[1]*u[2] - (8/3)*u[3] - @SVector [dx,dy,dz] -end -``` - -To make the solver internally use static arrays, we simply give it a static array as the initial condition: - -```julia -u0 = @SVector [1.0,0.0,0.0] -tspan = (0.0,100.0) -prob = ODEProblem(lorenz_static,u0,tspan) -@benchmark solve(prob,Tsit5()) -``` - -```julia -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -And that's pretty much all there is to it. With static arrays you don't have to worry about allocating, so use operations like `*` and don't worry about fusing operations (discussed in the next section). Do "the vectorized code" of R/MATLAB/Python and your code in this case will be fast, or directly use the numbers/values. - -#### Exercise 1 - -Implement the out-of-place array, in-place array, and out-of-place static array forms for the [Henon-Heiles System](https://en.wikipedia.org/wiki/H%C3%A9non%E2%80%93Heiles_system) and time the results. - -## Optimizing Large Systems - -### Interlude: Managing Allocations with Broadcast Fusion - -When your system is sufficiently large, or you have to make use of a non-native Julia algorithm, you have to make use of `Array`s. In order to use arrays in the most efficient manner, you need to be careful about temporary allocations. Vectorized calculations naturally have plenty of temporary array allocations. This is because a vectorized calculation outputs a vector. Thus: - -```julia -A = rand(1000,1000); B = rand(1000,1000); C = rand(1000,1000) -test(A,B,C) = A + B + C -@benchmark test(A,B,C) -``` -That expression `A + B + C` creates 2 arrays. It first creates one for the output of `A + B`, then uses that result array to `+ C` to get the final result. 2 arrays! We don't want that! The first thing to do to fix this is to use broadcast fusion. [Broadcast fusion](https://julialang.org/blog/2017/01/moredots) puts expressions together. For example, instead of doing the `+` operations separately, if we were to add them all at the same time, then we would only have a single array that's created. For example: - -```julia -test2(A,B,C) = map((a,b,c)->a+b+c,A,B,C) -@benchmark test2(A,B,C) -``` - -Puts the whole expression into a single function call, and thus only one array is required to store output. This is the same as writing the loop: - -```julia -function test3(A,B,C) - D = similar(A) - @inbounds for i in eachindex(A) - D[i] = A[i] + B[i] + C[i] - end - D -end -@benchmark test3(A,B,C) -``` - -However, Julia's broadcast is syntactic sugar for this. If multiple expressions have a `.`, then it will put those vectorized operations together. Thus: - -```julia -test4(A,B,C) = A .+ B .+ C -@benchmark test4(A,B,C) -``` - -is a version with only 1 array created (the output). Note that `.`s can be used with function calls as well: - -```julia -sin.(A) .+ sin.(B) -``` - -Also, the `@.` macro applys a dot to every operator: - -```julia -test5(A,B,C) = @. A + B + C #only one array allocated -@benchmark test5(A,B,C) -``` - -Using these tools we can get rid of our intermediate array allocations for many vectorized function calls. But we are still allocating the output array. To get rid of that allocation, we can instead use mutation. Mutating broadcast is done via `.=`. For example, if we pre-allocate the output: - -```julia -D = zeros(1000,1000); -``` - -Then we can keep re-using this cache for subsequent calculations. The mutating broadcasting form is: - -```julia -test6!(D,A,B,C) = D .= A .+ B .+ C #only one array allocated -@benchmark test6!(D,A,B,C) -``` - -If we use `@.` before the `=`, then it will turn it into `.=`: - -```julia -test7!(D,A,B,C) = @. D = A + B + C #only one array allocated -@benchmark test7!(D,A,B,C) -``` - -Notice that in this case, there is no "output", and instead the values inside of `D` are what are changed (like with the DiffEq inplace function). Many Julia functions have a mutating form which is denoted with a `!`. For example, the mutating form of the `map` is `map!`: - -```julia -test8!(D,A,B,C) = map!((a,b,c)->a+b+c,D,A,B,C) -@benchmark test8!(D,A,B,C) -``` - -Some operations require using an alternate mutating form in order to be fast. For example, matrix multiplication via `*` allocates a temporary: - -```julia -@benchmark A*B -``` - -Instead, we can use the mutating form `mul!` into a cache array to avoid allocating the output: - -```julia -using LinearAlgebra -@benchmark mul!(D,A,B) # same as D = A * B -``` - -For repeated calculations this reduced allocation can stop GC cycles and thus lead to more efficient code. Additionally, ***we can fuse together higher level linear algebra operations using BLAS***. The package [SugarBLAS.jl](https://github.com/lopezm94/SugarBLAS.jl) makes it easy to write higher level operations like `alpha*B*A + beta*C` as mutating BLAS calls. - -### Example Optimization: Gierer-Meinhardt Reaction-Diffusion PDE Discretization - -Let's optimize the solution of a Reaction-Diffusion PDE's discretization. In its discretized form, this is the ODE: - -$$ -\begin{align} -du &= D_1 (A_y u + u A_x) + \frac{au^2}{v} + \bar{u} - \alpha u\\ -dv &= D_2 (A_y v + v A_x) + a u^2 + \beta v -\end{align} -$$ - -where $u$, $v$, and $A$ are matrices. Here, we will use the simplified version where $A$ is the tridiagonal stencil $[1,-2,1]$, i.e. it's the 2D discretization of the LaPlacian. The native code would be something along the lines of: - -```julia -# Generate the constants -p = (1.0,1.0,1.0,10.0,0.001,100.0) # a,α,ubar,β,D1,D2 -N = 100 -Ax = Array(Tridiagonal([1.0 for i in 1:N-1],[-2.0 for i in 1:N],[1.0 for i in 1:N-1])) -Ay = copy(Ax) -Ax[2,1] = 2.0 -Ax[end-1,end] = 2.0 -Ay[1,2] = 2.0 -Ay[end,end-1] = 2.0 - -function basic_version!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = r[:,:,1] - v = r[:,:,2] - Du = D1*(Ay*u + u*Ax) - Dv = D2*(Ay*v + v*Ax) - dr[:,:,1] = Du .+ a.*u.*u./v .+ ubar .- α*u - dr[:,:,2] = Dv .+ a.*u.*u .- β*v -end - -a,α,ubar,β,D1,D2 = p -uss = (ubar+β)/α -vss = (a/β)*uss^2 -r0 = zeros(100,100,2) -r0[:,:,1] .= uss.+0.1.*rand.() -r0[:,:,2] .= vss - -prob = ODEProblem(basic_version!,r0,(0.0,0.1),p) -``` - -In this version we have encoded our initial condition to be a 3-dimensional array, with `u[:,:,1]` being the `A` part and `u[:,:,2]` being the `B` part. - -```julia -@benchmark solve(prob,Tsit5()) -``` - -While this version isn't very efficient, - -#### We recommend writing the "high-level" code first, and iteratively optimizing it! - -The first thing that we can do is get rid of the slicing allocations. The operation `r[:,:,1]` creates a temporary array instead of a "view", i.e. a pointer to the already existing memory. To make it a view, add `@view`. Note that we have to be careful with views because they point to the same memory, and thus changing a view changes the original values: - -```julia -A = rand(4) -@show A -B = @view A[1:3] -B[2] = 2 -@show A -``` - -Notice that changing `B` changed `A`. This is something to be careful of, but at the same time we want to use this since we want to modify the output `dr`. Additionally, the last statement is a purely element-wise operation, and thus we can make use of broadcast fusion there. Let's rewrite `basic_version!` to ***avoid slicing allocations*** and to ***use broadcast fusion***: - -```julia -function gm2!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - Du = D1*(Ay*u + u*Ax) - Dv = D2*(Ay*v + v*Ax) - @. du = Du + a.*u.*u./v + ubar - α*u - @. dv = Dv + a.*u.*u - β*v -end -prob = ODEProblem(gm2!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) -``` - -Now, most of the allocations are taking place in `Du = D1*(Ay*u + u*Ax)` since those operations are vectorized and not mutating. We should instead replace the matrix multiplications with `mul!`. When doing so, we will need to have cache variables to write into. This looks like: - -```julia -Ayu = zeros(N,N) -uAx = zeros(N,N) -Du = zeros(N,N) -Ayv = zeros(N,N) -vAx = zeros(N,N) -Dv = zeros(N,N) -function gm3!(dr,r,p,t) - a,α,ubar,β,D1,D2 = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - mul!(Ayu,Ay,u) - mul!(uAx,u,Ax) - mul!(Ayv,Ay,v) - mul!(vAx,v,Ax) - @. Du = D1*(Ayu + uAx) - @. Dv = D2*(Ayv + vAx) - @. du = Du + a*u*u./v + ubar - α*u - @. dv = Dv + a*u*u - β*v -end -prob = ODEProblem(gm3!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) -``` - -But our temporary variables are global variables. We need to either declare the caches as `const` or localize them. We can localize them by adding them to the parameters, `p`. It's easier for the compiler to reason about local variables than global variables. ***Localizing variables helps to ensure type stability***. - -```julia -p = (1.0,1.0,1.0,10.0,0.001,100.0,Ayu,uAx,Du,Ayv,vAx,Dv) # a,α,ubar,β,D1,D2 -function gm4!(dr,r,p,t) - a,α,ubar,β,D1,D2,Ayu,uAx,Du,Ayv,vAx,Dv = p - u = @view r[:,:,1] - v = @view r[:,:,2] - du = @view dr[:,:,1] - dv = @view dr[:,:,2] - mul!(Ayu,Ay,u) - mul!(uAx,u,Ax) - mul!(Ayv,Ay,v) - mul!(vAx,v,Ax) - @. Du = D1*(Ayu + uAx) - @. Dv = D2*(Ayv + vAx) - @. du = Du + a*u*u./v + ubar - α*u - @. dv = Dv + a*u*u - β*v -end -prob = ODEProblem(gm4!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) -``` - -We could then use the BLAS `gemmv` to optimize the matrix multiplications some more, but instead let's devectorize the stencil. - -```julia -p = (1.0,1.0,1.0,10.0,0.001,100.0,N) -function fast_gm!(du,u,p,t) - a,α,ubar,β,D1,D2,N = p - - @inbounds for j in 2:N-1, i in 2:N-1 - du[i,j,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - - @inbounds for j in 2:N-1, i in 2:N-1 - du[i,j,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds for j in 2:N-1 - i = 1 - du[1,j,1] = D1*(2u[i+1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for j in 2:N-1 - i = 1 - du[1,j,2] = D2*(2u[i+1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - @inbounds for j in 2:N-1 - i = N - du[end,j,1] = D1*(2u[i-1,j,1] + u[i,j+1,1] + u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for j in 2:N-1 - i = N - du[end,j,2] = D2*(2u[i-1,j,2] + u[i,j+1,2] + u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds for i in 2:N-1 - j = 1 - du[i,1,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for i in 2:N-1 - j = 1 - du[i,1,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - @inbounds for i in 2:N-1 - j = N - du[i,end,1] = D1*(u[i-1,j,1] + u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - end - @inbounds for i in 2:N-1 - j = N - du[i,end,2] = D2*(u[i-1,j,2] + u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end - - @inbounds begin - i = 1; j = 1 - du[1,1,1] = D1*(2u[i+1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[1,1,2] = D2*(2u[i+1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = 1; j = N - du[1,N,1] = D1*(2u[i+1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[1,N,2] = D2*(2u[i+1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = N; j = 1 - du[N,1,1] = D1*(2u[i-1,j,1] + 2u[i,j+1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[N,1,2] = D2*(2u[i-1,j,2] + 2u[i,j+1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - - i = N; j = N - du[end,end,1] = D1*(2u[i-1,j,1] + 2u[i,j-1,1] - 4u[i,j,1]) + - a*u[i,j,1]^2/u[i,j,2] + ubar - α*u[i,j,1] - du[end,end,2] = D2*(2u[i-1,j,2] + 2u[i,j-1,2] - 4u[i,j,2]) + - a*u[i,j,1]^2 - β*u[i,j,2] - end -end -prob = ODEProblem(fast_gm!,r0,(0.0,0.1),p) -@benchmark solve(prob,Tsit5()) -``` - -Lastly, we can do other things like multithread the main loops, but these optimizations get the last 2x-3x out. The main optimizations which apply everywhere are the ones we just performed (though the last one only works if your matrix is a stencil. This is known as a matrix-free implementation of the PDE discretization). - -This gets us to about 8x faster than our original MATLAB/SciPy/R vectorized style code! - -The last thing to do is then ***optimize our algorithm choice***. We have been using `Tsit5()` as our test algorithm, but in reality this problem is a stiff PDE discretization and thus one recommendation is to use `CVODE_BDF()`. However, instead of using the default dense Jacobian, we should make use of the sparse Jacobian afforded by the problem. The Jacobian is the matrix $\frac{df_i}{dr_j}$, where $r$ is read by the linear index (i.e. down columns). But since the $u$ variables depend on the $v$, the band size here is large, and thus this will not do well with a Banded Jacobian solver. Instead, we utilize sparse Jacobian algorithms. `CVODE_BDF` allows us to use a sparse Newton-Krylov solver by setting `linear_solver = :GMRES` (see [the solver documentation](https://docs.sciml.ai/dev/solvers/ode_solve/#ode_solve_sundials-1), and thus we can solve this problem efficiently. Let's see how this scales as we increase the integration time. - -```julia -prob = ODEProblem(fast_gm!,r0,(0.0,10.0),p) -@benchmark solve(prob,Tsit5()) -``` - -```julia -using Sundials -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES)) -``` - -```julia -prob = ODEProblem(fast_gm!,r0,(0.0,100.0),p) -# Will go out of memory if we don't turn off `save_everystep`! -@benchmark solve(prob,Tsit5(),save_everystep=false) -``` - -```julia -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES)) -``` - -Now let's check the allocation growth. - -```julia -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) -``` - -```julia -prob = ODEProblem(fast_gm!,r0,(0.0,500.0),p) -@benchmark solve(prob,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) -``` - -Notice that we've elimated almost all allocations, allowing the code to grow without hitting garbage collection and slowing down. - -Why is `CVODE_BDF` doing well? What's happening is that, because the problem is stiff, the number of steps required by the explicit Runge-Kutta method grows rapidly, whereas `CVODE_BDF` is taking large steps. Additionally, the `GMRES` linear solver form is quite an efficient way to solve the implicit system in this case. This is problem-dependent, and in many cases using a Krylov method effectively requires a preconditioner, so you need to play around with testing other algorithms and linear solvers to find out what works best with your problem. - -## Conclusion - -Julia gives you the tools to optimize the solver "all the way", but you need to make use of it. The main thing to avoid is temporary allocations. For small systems, this is effectively done via static arrays. For large systems, this is done via in-place operations and cache arrays. Either way, the resulting solution can be immensely sped up over vectorized formulations by using these principles. - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/04-callbacks_and_events.jmd b/tutorials/introduction/04-callbacks_and_events.jmd deleted file mode 100644 index d121b0b1..00000000 --- a/tutorials/introduction/04-callbacks_and_events.jmd +++ /dev/null @@ -1,327 +0,0 @@ ---- -title: Callbacks and Events -author: Chris Rackauckas ---- - -In working with a differential equation, our system will evolve through many states. Particular states of the system may be of interest to us, and we say that an ***"event"*** is triggered when our system reaches these states. For example, events may include the moment when our system reaches a particular temperature or velocity. We ***handle*** these events with ***callbacks***, which tell us what to do once an event has been triggered. - -These callbacks allow for a lot more than event handling, however. For example, we can use callbacks to achieve high-level behavior like exactly preserve conservation laws and save the trace of a matrix at pre-defined time points. This extra functionality allows us to use the callback system as a modding system for the DiffEq ecosystem's solvers. - -This tutorial is an introduction to the callback and event handling system in DifferentialEquations.jl, documented in the [Event Handling and Callback Functions](https://docs.sciml.ai/dev/features/callback_functions/) page of the documentation. We will also introduce you to some of the most widely used callbacks in the [Callback Library](https://docs.sciml.ai/dev/features/callback_library/), which is a library of pre-built mods. - -## Events and Continuous Callbacks - -Event handling is done through continuous callbacks. Callbacks take a function, `condition`, which triggers an `affect!` when `condition == 0`. These callbacks are called "continuous" because they will utilize rootfinding on the interpolation to find the "exact" time point at which the condition takes place and apply the `affect!` at that time point. - -***Let's use a bouncing ball as a simple system to explain events and callbacks.*** Let's take Newton's model of a ball falling towards the Earth's surface via a gravitational constant `g`. In this case, the velocity is changing via `-g`, and position is changing via the velocity. Therefore we receive the system of ODEs: - -```julia -using DifferentialEquations, ParameterizedFunctions -ball! = @ode_def BallBounce begin - dy = v - dv = -g -end g -``` - -We want the callback to trigger when `y=0` since that's when the ball will hit the Earth's surface (our event). We do this with the condition: - -```julia -function condition(u,t,integrator) - u[1] -end -``` - -Recall that the `condition` will trigger when it evaluates to zero, and here it will evaluate to zero when `u[1] == 0`, which occurs when `v == 0`. *Now we have to say what we want the callback to do.* Callbacks make use of the [Integrator Interface](https://docs.sciml.ai/dev/basics/integrator/). Instead of giving a full description, a quick and usable rundown is: - -- Values are strored in `integrator.u` -- Times are stored in `integrator.t` -- The parameters are stored in `integrator.p` -- `integrator(t)` performs an interpolation in the current interval between `integrator.tprev` and `integrator.t` (and allows extrapolation) -- User-defined options (tolerances, etc.) are stored in `integrator.opts` -- `integrator.sol` is the current solution object. Note that `integrator.sol.prob` is the current problem - -While there's a lot more on the integrator interface page, that's a working knowledge of what's there. - -What we want to do with our `affect!` is to "make the ball bounce". Mathematically speaking, the ball bounces when the sign of the velocity flips. As an added behavior, let's also use a small friction constant to dampen the ball's velocity. This way only a percentage of the velocity will be retained when the event is triggered and the callback is used. We'll define this behavior in the `affect!` function: - -```julia -function affect!(integrator) - integrator.u[2] = -integrator.p[2] * integrator.u[2] -end -``` - -`integrator.u[2]` is the second value of our model, which is `v` or velocity, and `integrator.p[2]`, is our friction coefficient. - -Therefore `affect!` can be read as follows: `affect!` will take the current value of velocity, and multiply it `-1` multiplied by our friction coefficient. Therefore the ball will change direction and its velocity will dampen when `affect!` is called. - -Now let's build the `ContinuousCallback`: - -```julia -bounce_cb = ContinuousCallback(condition,affect!) -``` - -Now let's make an `ODEProblem` which has our callback: - -```julia -u0 = [50.0,0.0] -tspan = (0.0,15.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=bounce_cb) -``` - -Notice that we chose a friction constant of `0.9`. Now we can solve the problem and plot the solution as we normally would: - -```julia -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) -``` - -and tada, the ball bounces! Notice that the `ContinuousCallback` is using the interpolation to apply the effect "exactly" when `v == 0`. This is crucial for model correctness, and thus when this property is needed a `ContinuousCallback` should be used. - -#### Exercise 1 - -In our example we used a constant coefficient of friction, but if we are bouncing the ball in the same place we may be smoothing the surface (say, squishing the grass), causing there to be less friction after each bounce. In this more advanced model, we want the friction coefficient at the next bounce to be `sqrt(friction)` from the previous bounce (since `friction < 1`, `sqrt(friction) > friction` and `sqrt(friction) < 1`). - -Hint: there are many ways to implement this. One way to do it is to make `p` a `Vector` and mutate the friction coefficient in the `affect!`. - -## Discrete Callbacks - -A discrete callback checks a `condition` after every integration step and, if true, it will apply an `affect!`. For example, let's say that at time `t=2` we want to include that a kid kicked the ball, adding `20` to the current velocity. This kind of situation, where we want to add a specific behavior which does not require rootfinding, is a good candidate for a `DiscreteCallback`. In this case, the `condition` is a boolean for whether to apply the `affect!`, so: - -```julia -function condition_kick(u,t,integrator) - t == 2 -end -``` - -We want the kick to occur at `t=2`, so we check for that time point. When we are at this time point, we want to do: - -```julia -function affect_kick!(integrator) - integrator.u[2] += 50 -end -``` - -Now we build the problem as before: - -```julia -kick_cb = DiscreteCallback(condition_kick,affect_kick!) -u0 = [50.0,0.0] -tspan = (0.0,10.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=kick_cb) -``` - -Note that, since we are requiring our effect at exactly the time `t=2`, we need to tell the integration scheme to step at exactly `t=2` to apply this callback. This is done via the option `tstops`, which is like `saveat` but means "stop at these values". - -```julia -sol = solve(prob,Tsit5(),tstops=[2.0]) -plot(sol) -``` - -Note that this example could've been done with a `ContinuousCallback` by checking the condition `t-2`. - -## Merging Callbacks with Callback Sets - -In some cases you may want to merge callbacks to build up more complex behavior. In our previous result, notice that the model is unphysical because the ball goes below zero! What we really need to do is add the bounce callback together with the kick. This can be achieved through the `CallbackSet`. - -```julia -cb = CallbackSet(bounce_cb,kick_cb) -``` - -A `CallbackSet` merges their behavior together. The logic is as follows. In a given interval, if there are multiple continuous callbacks that would trigger, only the one that triggers at the earliest time is used. The time is pulled back to where that continuous callback is triggered, and then the `DiscreteCallback`s in the callback set are called in order. - -```julia -u0 = [50.0,0.0] -tspan = (0.0,15.0) -p = (9.8,0.9) -prob = ODEProblem(ball!,u0,tspan,p,callback=cb) -sol = solve(prob,Tsit5(),tstops=[2.0]) -plot(sol) -``` - -Notice that we have now merged the behaviors. We can then nest this as deep as we like. - -#### Exercise 2 - -Add to the model a linear wind with resistance that changes the acceleration to `-g + k*v` after `t=10`. Do so by adding another parameter and allowing it to be zero until a specific time point where a third callback triggers the change. - -## Integration Termination and Directional Handling - -Let's look at another model now: the model of the [Harmonic Oscillator](https://en.wikipedia.org/wiki/Harmonic_oscillator). We can write this as: - -```julia -u0 = [1.,0.] -harmonic! = @ode_def HarmonicOscillator begin - dv = -x - dx = v -end -tspan = (0.0,10.0) -prob = ODEProblem(harmonic!,u0,tspan) -sol = solve(prob) -plot(sol) -``` - -Let's instead stop the integration when a condition is met. From the [Integrator Interface stepping controls](https://docs.sciml.ai/dev/basics/integrator/#stepping_controls-1) we see that `terminate!(integrator)` will cause the integration to end. So our new `affect!` is simply: - -```julia -function terminate_affect!(integrator) - terminate!(integrator) -end -``` - -Let's first stop the integration when the particle moves back to `x=0`. This means we want to use the condition: - -```julia -function terminate_condition(u,t,integrator) - u[2] -end -terminate_cb = ContinuousCallback(terminate_condition,terminate_affect!) -``` - -Note that instead of adding callbacks to the problem, we can also add them to the `solve` command. This will automatically form a `CallbackSet` with any problem-related callbacks and naturally allows you to distinguish between model features and integration controls. - -```julia -sol = solve(prob,callback=terminate_cb) -plot(sol) -``` - -Notice that the harmonic oscilator's true solution here is `sin` and `cosine`, and thus we would expect this return to zero to happen at `t=π`: - -```julia -sol.t[end] -``` - -This is one way to approximate π! Lower tolerances and arbitrary precision numbers can make this more exact, but let's not look at that. Instead, what if we wanted to halt the integration after exactly one cycle? To do so we would need to ignore the first zero-crossing. Luckily in these types of scenarios there's usually a structure to the problem that can be exploited. Here, we only want to trigger the `affect!` when crossing from positive to negative, and not when crossing from negative to positive. In other words, we want our `affect!` to only occur on upcrossings. - -If the `ContinuousCallback` constructor is given a single `affect!`, it will occur on both upcrossings and downcrossings. If there are two `affect!`s given, then the first is for upcrossings and the second is for downcrossings. An `affect!` can be ignored by using `nothing`. Together, the "upcrossing-only" version of the effect means that the first `affect!` is what we defined above and the second is `nothing`. Therefore we want: - -```julia -terminate_upcrossing_cb = ContinuousCallback(terminate_condition,terminate_affect!,nothing) -``` - -Which gives us: - -```julia -sol = solve(prob,callback=terminate_upcrossing_cb) -plot(sol) -``` - -## Callback Library - -As you can see, callbacks can be very useful and through `CallbackSets` we can merge together various behaviors. Because of this utility, there is a library of pre-built callbacks known as the [Callback Library](http://docs.sciml.ai/dev/features/callback_library/). We will walk through a few examples where these callbacks can come in handy. - -### Manifold Projection - -One callback is the manifold projection callback. Essentially, you can define any manifold `g(sol)=0` which the solution must live on, and cause the integration to project to that manifold after every step. As an example, let's see what happens if we naively run the harmonic oscillator for a long time: - -```julia -tspan = (0.0,10000.0) -prob = ODEProblem(harmonic!,u0,tspan) -sol = solve(prob) -gr(fmt=:png) # Make it a PNG instead of an SVG since there's a lot of points! -plot(sol,vars=(1,2)) -``` - -```julia -plot(sol,vars=(0,1),denseplot=false) -``` - -Notice that what's going on is that the numerical solution is drifting from the true solution over this long time scale. This is because the integrator is not conserving energy. - -```julia -plot(sol.t,[u[2]^2 + u[1]^2 for u in sol.u]) # Energy ~ x^2 + v^2 -``` - -Some integration techniques like [symplectic integrators](https://docs.sciml.ai/dev/solvers/dynamical_solve/#Symplectic-Integrators-1) are designed to mitigate this issue, but instead let's tackle the problem by enforcing conservation of energy. To do so, we define our manifold as the one where energy equals 1 (since that holds in the initial condition), that is: - -```julia -function g(resid,u,p,t) - resid[1] = u[2]^2 + u[1]^2 - 1 - resid[2] = 0 -end -``` - -Here the residual measures how far from our desired energy we are, and the number of conditions matches the size of our system (we ignored the second one by making the residual 0). Thus we define a `ManifoldProjection` callback and add that to the solver: - -```julia -cb = ManifoldProjection(g) -sol = solve(prob,callback=cb) -plot(sol,vars=(1,2)) -``` - -```julia -plot(sol,vars=(0,1),denseplot=false) -``` - -Now we have "perfect" energy conservation, where if it's ever violated too much the solution will get projected back to `energy=1`. - -```julia -u1,u2 = sol[500] -u2^2 + u1^2 -``` - -While choosing different integration schemes and using lower tolerances can achieve this effect as well, this can be a nice way to enforce physical constraints and is thus used in many disciplines like molecular dynamics. Another such domain constraining callback is the [`PositiveCallback()`](https://docs.sciml.ai/dev/features/callback_library/#PositiveDomain-1) which can be used to enforce positivity of the variables. - -### SavingCallback - -The `SavingCallback` can be used to allow for special saving behavior. Let's take a linear ODE define on a system of 1000x1000 matrices: - -```julia -prob = ODEProblem((du,u,p,t)->du.=u,rand(1000,1000),(0.0,1.0)) -``` - -In fields like quantum mechanics you may only want to know specific properties of the solution such as the trace or the norm of the matrix. Saving all of the 1000x1000 matrices can be a costly way to get this information! Instead, we can use the `SavingCallback` to save the `trace` and `norm` at specified times. To do so, we first define our `SavedValues` cache. Our time is in terms of `Float64`, and we want to save tuples of `Float64`s (one for the `trace` and one for the `norm`), and thus we generate the cache as: - -```julia -saved_values = SavedValues(Float64, Tuple{Float64,Float64}) -``` - -Now we define the `SavingCallback` by giving it a function of `(u,p,t,integrator)` that returns the values to save, and the cache: - -```julia -using LinearAlgebra -cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values) -``` - -Here we take `u` and save `(tr(u),norm(u))`. When we solve with this callback: - -```julia -sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving -``` - -Our values are stored in our `saved_values` variable: - -```julia -saved_values.t -``` - -```julia -saved_values.saveval -``` - -By default this happened only at the solver's steps. But the `SavingCallback` has similar controls as the integrator. For example, if we want to save at every `0.1` seconds, we do can so using `saveat`: - -```julia -saved_values = SavedValues(Float64, Tuple{Float64,Float64}) # New cache -cb = SavingCallback((u,t,integrator)->(tr(u),norm(u)), saved_values, saveat = 0.0:0.1:1.0) -sol = solve(prob, Tsit5(), callback=cb, save_everystep=false, save_start=false, save_end = false) # Turn off normal saving -``` - -```julia -saved_values.t -``` - -```julia -saved_values.saveval -``` - -#### Exercise 3 - -Go back to the Harmonic oscillator. Use the `SavingCallback` to save an array for the energy over time, and do this both with and without the `ManifoldProjection`. Plot the results to see the difference the projection makes. - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/05-formatting_plots.jmd b/tutorials/introduction/05-formatting_plots.jmd deleted file mode 100644 index 499531cc..00000000 --- a/tutorials/introduction/05-formatting_plots.jmd +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Formatting Plots -author: Chris Rackauckas ---- - -Since the plotting functionality is implemented as a recipe to Plots.jl, [all of the options open to Plots.jl can be used in our plots](https://juliaplots.github.io/supported/). In addition, there are special features specifically for [differential equation plots](https://docs.sciml.ai/dev/basics/plot/). This tutorial will teach some of the most commonly used options. Let's first get the solution to some ODE. Here I will use one of the Lorenz ordinary differential equation. As with all commands in DifferentialEquations.jl, I got a plot of the solution by calling `solve` on the problem, and `plot` on the solution: - -```julia -using DifferentialEquations, Plots, ParameterizedFunctions -gr() -lorenz = @ode_def Lorenz begin - dx = σ*(y-x) - dy = ρ*x-y-x*z - dz = x*y-β*z -end σ β ρ - -p = [10.0,8/3,28] -u0 = [1., 5., 10.] -tspan = (0., 100.) -prob = ODEProblem(lorenz, u0, tspan, p) -sol = solve(prob) -``` - -```julia -plot(sol) -``` - -Now let's change it to a phase plot. As discussed in the [plot functions page](https://docs.sciml.ai/dev/basics/plot/), we can use the `vars` command to choose the variables to plot. Let's plot variable `x` vs variable `y` vs variable `z`: - -```julia -plot(sol,vars=(1, 2, 3)) -``` - -We can also choose to plot the timeseries for a single variable: - -```julia -plot(sol,vars=[:x]) -``` - -Notice that we were able to use the variable names because we had defined the problem with the macro. But in general, we can use the indices. The previous plots would be: - -```julia -plot(sol,vars=(1,2,3)) -plot(sol,vars=[1]) -``` - -Common options are to add titles, axis, and labels. For example: - -```julia -plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line", -xaxis="Time (t)",yaxis="u(t) (in mm)",label=["X","Y","Z"]) -``` - -Notice that series recipes apply to the solution type as well. For example, we can use a scatter plot on the timeseries: - -```julia -scatter(sol,vars=[:x]) -``` - -This shows that the recipe is using the interpolation to smooth the plot. It becomes abundantly clear when we turn it off using `denseplot=false`: - -```julia -plot(sol,vars=(1,2,3),denseplot=false) -``` - -When this is done, only the values the timestep hits are plotted. Using the interpolation usually results in a much nicer looking plot so it's recommended, and since the interpolations have similar orders to the numerical methods, their results are trustworthy on the full interval. We can control the number of points used in the interpolation's plot using the `plotdensity` command: - -```julia -plot(sol,vars=(1,2,3),plotdensity=100) -``` - -That's plotting the entire solution using 100 points spaced evenly in time. - -```julia -plot(sol,vars=(1,2,3),plotdensity=10000) -``` - -That's more like it! By default it uses `100*length(sol)`, where the length is the number of internal steps it had to take. This heuristic usually does well, but unusually difficult equations it can be relaxed (since it will take small steps), and for equations with events / discontinuities raising the plot density can help resolve the discontinuity. - -Lastly notice that we can compose plots. Let's show where the 100 points are using a scatter plot: - -```julia -plot(sol,vars=(1,2,3)) -scatter!(sol,vars=(1,2,3),plotdensity=100) -``` - -We can instead work with an explicit plot object. This form can be better for building a complex plot in a loop. - -```julia -p = plot(sol,vars=(1,2,3)) -scatter!(p,sol,vars=(1,2,3),plotdensity=100) -title!("I added a title") -``` - -You can do all sorts of things. Have fun! - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/introduction/Project.toml b/tutorials/introduction/Project.toml deleted file mode 100644 index 8cabdc5e..00000000 --- a/tutorials/introduction/Project.toml +++ /dev/null @@ -1,17 +0,0 @@ -[deps] -BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" -DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -ParameterizedFunctions = "65888b18-ceab-5e60-b2b9-181511a3b968" -Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" -Sundials = "c3572dad-4567-51f8-b174-8c6c989267f4" -SciMLTutorials = "30cb0354-2223-46a9-baa0-41bdcfbe0178" - -[compat] -BenchmarkTools = "0.5, 0.6, 0.7, 1.0" -DifferentialEquations = "6.14" -ParameterizedFunctions = "5.3" -Plots = "1.4" -StaticArrays = "0.12, 1.0" -Sundials = "4.2" diff --git a/tutorials/model_inference/01-pendulum_bayesian_inference.jmd b/tutorials/model_inference/01-pendulum_bayesian_inference.jmd deleted file mode 100644 index 2f3f7524..00000000 --- a/tutorials/model_inference/01-pendulum_bayesian_inference.jmd +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: Bayesian Inference on a Pendulum using DiffEqBayes.jl -author: Vaibhav Dixit ---- - -### Set up simple pendulum problem - -```julia -using DiffEqBayes, OrdinaryDiffEq, RecursiveArrayTools, Distributions, Plots, StatsPlots, BenchmarkTools, TransformVariables, CmdStan, DynamicHMC -``` - -Let's define our simple pendulum problem. Here our pendulum has a drag term `ω` -and a length `L`. - -![pendulum](https://user-images.githubusercontent.com/1814174/59942945-059c1680-942f-11e9-991c-2025e6e4ccd3.jpg) - -We get first order equations by defining the first term as the velocity and the -second term as the position, getting: - -```julia -function pendulum(du,u,p,t) - ω,L = p - x,y = u - du[1] = y - du[2] = - ω*y -(9.8/L)*sin(x) -end - -u0 = [1.0,0.1] -tspan = (0.0,10.0) -prob1 = ODEProblem(pendulum,u0,tspan,[1.0,2.5]) -``` - -### Solve the model and plot - -To understand the model and generate data, let's solve and visualize the solution -with the known parameters: - -```julia -sol = solve(prob1,Tsit5()) -plot(sol) -``` - -It's the pendulum, so you know what it looks like. It's periodic, but since we -have not made a small angle assumption it's not exactly `sin` or `cos`. Because -the true dampening parameter `ω` is 1, the solution does not decay over time, -nor does it increase. The length `L` determines the period. - -### Create some dummy data to use for estimation - -We now generate some dummy data to use for estimation - -```julia -t = collect(range(1,stop=10,length=10)) -randomized = VectorOfArray([(sol(t[i]) + .01randn(2)) for i in 1:length(t)]) -data = convert(Array,randomized) -``` - -Let's see what our data looks like on top of the real solution - -```julia -scatter!(data') -``` - -This data captures the non-dampening effect and the true period, making it -perfect to attempting a Bayesian inference. - -### Perform Bayesian Estimation - -Now let's fit the pendulum to the data. Since we know our model is correct, -this should give us back the parameters that we used to generate the data! -Define priors on our parameters. In this case, let's assume we don't have much -information, but have a prior belief that ω is between 0.1 and 3.0, while the -length of the pendulum L is probably around 3.0: - -```julia -priors = [Uniform(0.1,3.0), Normal(3.0,1.0)] -``` - -Finally let's run the estimation routine from DiffEqBayes.jl with the Turing.jl backend to check if we indeed recover the parameters! - -```julia -bayesian_result = turing_inference(prob1,Tsit5(),t,data,priors;num_samples=10_000, - syms = [:omega,:L]) -``` - -Notice that while our guesses had the wrong means, the learned parameters converged -to the correct means, meaning that it learned good posterior distributions for the -parameters. To look at these posterior distributions on the parameters, we can -examine the chains: - -```julia -plot(bayesian_result) -``` - -As a diagnostic, we will also check the parameter chains. The chain is the MCMC -sampling process. The chain should explore parameter space and converge reasonably -well, and we should be taking a lot of samples after it converges (it is these -samples that form the posterior distribution!) - -```julia -plot(bayesian_result, colordim = :parameter) -``` - -Notice that after awhile these chains converge to a "fuzzy line", meaning it -found the area with the most likelihood and then starts to sample around there, -which builds a posterior distribution around the true mean. - -DiffEqBayes.jl allows the choice of using Stan.jl, Turing.jl and DynamicHMC.jl for MCMC, you can also use ApproxBayes.jl for Approximate Bayesian computation algorithms. -Let's compare the timings across the different MCMC backends. We'll stick with the default arguments and 10,000 samples in each since there is a lot of room for micro-optimization -specific to each package and algorithm combinations, you might want to do your own experiments for specific problems to get better understanding of the performance. - -```julia -@btime bayesian_result = turing_inference(prob1,Tsit5(),t,data,priors;syms = [:omega,:L],num_samples=10_000) -``` - -```julia -@btime bayesian_result = stan_inference(prob1,t,data,priors;num_samples=10_000,printsummary=false) -``` - -```julia -@btime bayesian_result = dynamichmc_inference(prob1,Tsit5(),t,data,priors;num_samples = 10_000) -``` diff --git a/tutorials/model_inference/02-monte_carlo_parameter_estim.jmd b/tutorials/model_inference/02-monte_carlo_parameter_estim.jmd deleted file mode 100644 index 0f0d5fee..00000000 --- a/tutorials/model_inference/02-monte_carlo_parameter_estim.jmd +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: Monte Carlo Parameter Estimation From Data -author: Chris Rackauckas ---- - -First you want to create a problem which solves multiple problems at the same time. This is the Monte Carlo Problem. When the parameter estimation tools say it will take any DEProblem, it really means ANY DEProblem! - -So, let's get a Monte Carlo problem setup that solves with 10 different initial conditions. - -```julia -using DifferentialEquations, DiffEqParamEstim, Plots, Optim - -# Monte Carlo Problem Set Up for solving set of ODEs with different initial conditions - -# Set up Lotka-Volterra system -function pf_func(du,u,p,t) - du[1] = p[1] * u[1] - p[2] * u[1]*u[2] - du[2] = -3 * u[2] + u[1]*u[2] -end -p = [1.5,1.0] -prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),p) -``` - -Now for a MonteCarloProblem we have to take this problem and tell it what to do N times via the prob_func. So let's generate N=10 different initial conditions, and tell it to run the same problem but with these 10 different initial conditions each time: - -```julia -# Setting up to solve the problem N times (for the N different initial conditions) -N = 10; -initial_conditions = [[1.0,1.0], [1.0,1.5], [1.5,1.0], [1.5,1.5], [0.5,1.0], [1.0,0.5], [0.5,0.5], [2.0,1.0], [1.0,2.0], [2.0,2.0]] -function prob_func(prob,i,repeat) - ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p) -end -monte_prob = MonteCarloProblem(prob,prob_func=prob_func) -``` - -We can check this does what we want by solving it: - -```julia -# Check above does what we want -sim = solve(monte_prob,Tsit5(),num_monte=N) -plot(sim) -``` - -num_monte=N means "run N times", and each time it runs the problem returned by the prob_func, which is always the same problem but with the ith initial condition. - -Now let's generate a dataset from that. Let's get data points at every t=0.1 using saveat, and then convert the solution into an array. - -```julia -# Generate a dataset from these runs -data_times = 0.0:0.1:10.0 -sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times) -data = Array(sim) -``` - -Here, data[i,j,k] is the same as sim[i,j,k] which is the same as sim[k][i,j] (where sim[k] is the kth solution). So data[i,j,k] is the jth timepoint of the ith variable in the kth trajectory. - -Now let's build a loss function. A loss function is some loss(sol) that spits out a scalar for how far from optimal we are. In the documentation I show that we normally do loss = L2Loss(t,data), but we can bootstrap off of this. Instead lets build an array of N loss functions, each one with the correct piece of data. - -```julia -# Building a loss function -losses = [L2Loss(data_times,data[:,:,i]) for i in 1:N] -``` - -So losses[i] is a function which computes the loss of a solution against the data of the ith trajectory. So to build our true loss function, we sum the losses: - -```julia -loss(sim) = sum(losses[i](sim[i]) for i in 1:N) -``` - -As a double check, make sure that loss(sim) outputs zero (since we generated the data from sim). Now we generate data with other parameters: - -```julia -prob = ODEProblem(pf_func,[1.0,1.0],(0.0,10.0),[1.2,0.8]) -function prob_func(prob,i,repeat) - ODEProblem(prob.f,initial_conditions[i],prob.tspan,prob.p) -end -monte_prob = MonteCarloProblem(prob,prob_func=prob_func) -sim = solve(monte_prob,Tsit5(),num_monte=N,saveat=data_times) -loss(sim) -``` - -and get a non-zero loss. So we now have our problem, our data, and our loss function... we have what we need. - -Put this into build_loss_objective. - -```julia -obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N, - saveat=data_times) -``` - -Notice that I added the kwargs for solve into this. They get passed to an internal solve command, so then the loss is computed on N trajectories at data_times. - -Thus we take this objective function over to any optimization package. I like to do quick things in Optim.jl. Here, since the Lotka-Volterra equation requires positive parameters, I use Fminbox to make sure the parameters stay positive. I start the optimization with [1.3,0.9], and Optim spits out that the true parameters are: - -```julia -lower = zeros(2) -upper = fill(2.0,2) -result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS())) -``` - -```julia -result -``` - -Optim finds one but not the other parameter. - -I would run a test on synthetic data for your problem before using it on real data. Maybe play around with different optimization packages, or add regularization. You may also want to decrease the tolerance of the ODE solvers via - -```julia -obj = build_loss_objective(monte_prob,Tsit5(),loss,num_monte=N, - abstol=1e-8,reltol=1e-8, - saveat=data_times) -result = optimize(obj, lower, upper, [1.3,0.9], Fminbox(BFGS())) -``` - -```julia -result -``` - -if you suspect error is the problem. However, if you're having problems it's most likely not the ODE solver tolerance and mostly because parameter inference is a very hard optimization problem. - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/model_inference/Project.toml b/tutorials/model_inference/Project.toml deleted file mode 100644 index 04690720..00000000 --- a/tutorials/model_inference/Project.toml +++ /dev/null @@ -1,30 +0,0 @@ -[deps] -BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" -CmdStan = "593b3428-ca2f-500c-ae53-031589ec8ddd" -DiffEqBayes = "ebbdde9d-f333-5424-9be2-dbf1e9acfb5e" -DiffEqParamEstim = "1130ab10-4a5a-5621-a13d-e4788d82bd4c" -DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" -Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" -DynamicHMC = "bbc10e6e-7c05-544b-b16e-64fede858acb" -Optim = "429524aa-4258-5aef-a3af-852621145aeb" -OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" -StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" -TransformVariables = "84d833dd-6860-57f9-a1a7-6da5db126cff" -SciMLTutorials = "30cb0354-2223-46a9-baa0-41bdcfbe0178" - -[compat] -BenchmarkTools = "0.5, 0.6, 0.7, 1.0" -CmdStan = "6.0" -DiffEqBayes = "2.15" -DiffEqParamEstim = "1.15" -DifferentialEquations = "6.14" -Distributions = "0.23, 0.24, 0.25" -DynamicHMC = "2.1" -Optim = "0.21, 0.22, 1.0" -OrdinaryDiffEq = "5.41" -Plots = "1.4" -RecursiveArrayTools = "2.5" -StatsPlots = "0.14" -TransformVariables = "0.3" diff --git a/tutorials/models/01-classical_physics.jmd b/tutorials/models/01-classical_physics.jmd deleted file mode 100644 index b828488f..00000000 --- a/tutorials/models/01-classical_physics.jmd +++ /dev/null @@ -1,411 +0,0 @@ ---- -title: Classical Physics Models -author: Yingbo Ma, Chris Rackauckas ---- - -If you're getting some cold feet to jump in to DiffEq land, here are some handcrafted differential equations mini problems to hold your hand along the beginning of your journey. - -## First order linear ODE - -#### Radioactive Decay of Carbon-14 - -$$f(t,u) = \frac{du}{dt}$$ - -The Radioactive decay problem is the first order linear ODE problem of an exponential with a negative coefficient, which represents the half-life of the process in question. Should the coefficient be positive, this would represent a population growth equation. - -```julia -using OrdinaryDiffEq, Plots -gr() - -#Half-life of Carbon-14 is 5,730 years. -C₁ = 5.730 - -#Setup -u₀ = 1.0 -tspan = (0.0, 1.0) - -#Define the problem -radioactivedecay(u,p,t) = -C₁*u - -#Pass to solver -prob = ODEProblem(radioactivedecay,u₀,tspan) -sol = solve(prob,Tsit5()) - -#Plot -plot(sol,linewidth=2,title ="Carbon-14 half-life", xaxis = "Time in thousands of years", yaxis = "Percentage left", label = "Numerical Solution") -plot!(sol.t, t->exp(-C₁*t),lw=3,ls=:dash,label="Analytical Solution") -``` - -## Second Order Linear ODE - -#### Simple Harmonic Oscillator - -Another classical example is the harmonic oscillator, given by -$$ -\ddot{x} + \omega^2 x = 0 -$$ -with the known analytical solution -$$ -\begin{align*} -x(t) &= A\cos(\omega t - \phi) \\ -v(t) &= -A\omega\sin(\omega t - \phi), -\end{align*} -$$ -where -$$ -A = \sqrt{c_1 + c_2} \qquad\text{and}\qquad \tan \phi = \frac{c_2}{c_1} -$$ -with $c_1, c_2$ constants determined by the initial conditions such that -$c_1$ is the initial position and $\omega c_2$ is the initial velocity. - -Instead of transforming this to a system of ODEs to solve with `ODEProblem`, -we can use `SecondOrderODEProblem` as follows. - -```julia -# Simple Harmonic Oscillator Problem -using OrdinaryDiffEq, Plots - -#Parameters -ω = 1 - -#Initial Conditions -x₀ = [0.0] -dx₀ = [π/2] -tspan = (0.0, 2π) - -ϕ = atan((dx₀[1]/ω)/x₀[1]) -A = √(x₀[1]^2 + dx₀[1]^2) - -#Define the problem -function harmonicoscillator(ddu,du,u,ω,t) - ddu .= -ω^2 * u -end - -#Pass to solvers -prob = SecondOrderODEProblem(harmonicoscillator, dx₀, x₀, tspan, ω) -sol = solve(prob, DPRKN6()) - -#Plot -plot(sol, vars=[2,1], linewidth=2, title ="Simple Harmonic Oscillator", xaxis = "Time", yaxis = "Elongation", label = ["x" "dx"]) -plot!(t->A*cos(ω*t-ϕ), lw=3, ls=:dash, label="Analytical Solution x") -plot!(t->-A*ω*sin(ω*t-ϕ), lw=3, ls=:dash, label="Analytical Solution dx") -``` - -Note that the order of the variables (and initial conditions) is `dx`, `x`. -Thus, if we want the first series to be `x`, we have to flip the order with `vars=[2,1]`. - -## Second Order Non-linear ODE - -#### Simple Pendulum - -We will start by solving the pendulum problem. In the physics class, we often solve this problem by small angle approximation, i.e. $ sin(\theta) \approx \theta$, because otherwise, we get an elliptic integral which doesn't have an analytic solution. The linearized form is - -$$\ddot{\theta} + \frac{g}{L}{\theta} = 0$$ - -But we have numerical ODE solvers! Why not solve the *real* pendulum? - -$$\ddot{\theta} + \frac{g}{L}{\sin(\theta)} = 0$$ - -Notice that now we have a second order ODE. -In order to use the same method as above, we nee to transform it into a system -of first order ODEs by employing the notation $d\theta = \dot{\theta}$. - -$$ -\begin{align*} -&\dot{\theta} = d{\theta} \\ -&\dot{d\theta} = - \frac{g}{L}{\sin(\theta)} -\end{align*} -$$ - -```julia -# Simple Pendulum Problem -using OrdinaryDiffEq, Plots - -#Constants -const g = 9.81 -L = 1.0 - -#Initial Conditions -u₀ = [0,π/2] -tspan = (0.0,6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L)*sin(θ) -end - -#Pass to solvers -prob = ODEProblem(simplependulum, u₀, tspan) -sol = solve(prob,Tsit5()) - -#Plot -plot(sol,linewidth=2,title ="Simple Pendulum Problem", xaxis = "Time", yaxis = "Height", label = ["\\theta" "d\\theta"]) -``` - -So now we know that behaviour of the position versus time. However, it will be useful to us to look at the phase space of the pendulum, i.e., and representation of all possible states of the system in question (the pendulum) by looking at its velocity and position. Phase space analysis is ubiquitous in the analysis of dynamical systems, and thus we will provide a few facilities for it. - -```julia -p = plot(sol,vars = (1,2), xlims = (-9,9), title = "Phase Space Plot", xaxis = "Velocity", yaxis = "Position", leg=false) -function phase_plot(prob, u0, p, tspan=2pi) - _prob = ODEProblem(prob.f,u0,(0.0,tspan)) - sol = solve(_prob,Vern9()) # Use Vern9 solver for higher accuracy - plot!(p,sol,vars = (1,2), xlims = nothing, ylims = nothing) -end -for i in -4pi:pi/2:4π - for j in -4pi:pi/2:4π - phase_plot(prob, [j,i], p) - end -end -plot(p,xlims = (-9,9)) -``` - -#### Double Pendulum - -A more complicated example is given by the double pendulum. The equations governing -its motion are given by the following (taken from this [StackOverflow question](https://mathematica.stackexchange.com/questions/40122/help-to-plot-poincar%C3%A9-section-for-double-pendulum)) - -$$\frac{d}{dt} -\begin{pmatrix} -\alpha \\ l_\alpha \\ \beta \\ l_\beta -\end{pmatrix}= -\begin{pmatrix} -2\frac{l_\alpha - (1+\cos\beta)l_\beta}{3-\cos 2\beta} \\ --2\sin\alpha - \sin(\alpha + \beta) \\ -2\frac{-(1+\cos\beta)l_\alpha + (3+2\cos\beta)l_\beta}{3-\cos2\beta}\\ --\sin(\alpha+\beta) - 2\sin(\beta)\frac{(l_\alpha-l_\beta)l_\beta}{3-\cos2\beta} + 2\sin(2\beta)\frac{l_\alpha^2-2(1+\cos\beta)l_\alpha l_\beta + (3+2\cos\beta)l_\beta^2}{(3-\cos2\beta)^2} -\end{pmatrix}$$ - -```julia -#Double Pendulum Problem -using OrdinaryDiffEq, Plots - -#Constants and setup -const m₁, m₂, L₁, L₂ = 1, 2, 1, 2 -initial = [0, π/3, 0, 3pi/5] -tspan = (0.,50.) - -#Convenience function for transforming from polar to Cartesian coordinates -function polar2cart(sol;dt=0.02,l1=L₁,l2=L₂,vars=(2,4)) - u = sol.t[1]:dt:sol.t[end] - - p1 = l1*map(x->x[vars[1]], sol.(u)) - p2 = l2*map(y->y[vars[2]], sol.(u)) - - x1 = l1*sin.(p1) - y1 = l1*-cos.(p1) - (u, (x1 + l2*sin.(p2), - y1 - l2*cos.(p2))) -end - -#Define the Problem -function double_pendulum(xdot,x,p,t) - xdot[1]=x[2] - xdot[2]=-((g*(2*m₁+m₂)*sin(x[1])+m₂*(g*sin(x[1]-2*x[3])+2*(L₂*x[4]^2+L₁*x[2]^2*cos(x[1]-x[3]))*sin(x[1]-x[3])))/(2*L₁*(m₁+m₂-m₂*cos(x[1]-x[3])^2))) - xdot[3]=x[4] - xdot[4]=(((m₁+m₂)*(L₁*x[2]^2+g*cos(x[1]))+L₂*m₂*x[4]^2*cos(x[1]-x[3]))*sin(x[1]-x[3]))/(L₂*(m₁+m₂-m₂*cos(x[1]-x[3])^2)) -end - -#Pass to Solvers -double_pendulum_problem = ODEProblem(double_pendulum, initial, tspan) -sol = solve(double_pendulum_problem, Vern7(), abs_tol=1e-10, dt=0.05); -``` - -```julia -#Obtain coordinates in Cartesian Geometry -ts, ps = polar2cart(sol, l1=L₁, l2=L₂, dt=0.01) -plot(ps...) -``` - -##### Poincaré section - -In this case the phase space is 4 dimensional and it cannot be easily visualized. -Instead of looking at the full phase space, we can look at Poincaré sections, -which are sections through a higher-dimensional phase space diagram. -This helps to understand the dynamics of interactions and is wonderfully pretty. - -The Poincaré section in this is given by the collection of $(β,l_β)$ when $α=0$ and $\frac{dα}{dt}>0$. - -```julia -#Constants and setup -using OrdinaryDiffEq -initial2 = [0.01, 0.005, 0.01, 0.01] -tspan2 = (0.,500.) - -#Define the problem -function double_pendulum_hamiltonian(udot,u,p,t) - α = u[1] - lα = u[2] - β = u[3] - lβ = u[4] - udot .= - [2(lα-(1+cos(β))lβ)/(3-cos(2β)), - -2sin(α) - sin(α+β), - 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)), - -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)] -end - -# Construct a ContiunousCallback -condition(u,t,integrator) = u[1] -affect!(integrator) = nothing -cb = ContinuousCallback(condition,affect!,nothing, - save_positions = (true,false)) - -# Construct Problem -poincare = ODEProblem(double_pendulum_hamiltonian, initial2, tspan2) -sol2 = solve(poincare, Vern9(), save_everystep = false, save_start=false, save_end=false, callback=cb, abstol=1e-16, reltol=1e-16,) - -function poincare_map(prob, u₀, p; callback=cb) - _prob = ODEProblem(prob.f, u₀, prob.tspan) - sol = solve(_prob, Vern9(), save_everystep = false, save_start=false, save_end=false, callback=cb, abstol=1e-16, reltol=1e-16) - scatter!(p, sol, vars=(3,4), markersize = 3, msw=0) -end -``` - -```julia -lβrange = -0.02:0.0025:0.02 -p = scatter(sol2, vars=(3,4), leg=false, markersize = 3, msw=0) -for lβ in lβrange - poincare_map(poincare, [0.01, 0.01, 0.01, lβ], p) -end -plot(p, xlabel="\\beta", ylabel="l_\\beta", ylims=(0, 0.03)) -``` - -#### Hénon-Heiles System - -The Hénon-Heiles potential occurs when non-linear motion of a star around a galactic center with the motion restricted to a plane. - -$$ -\begin{align} -\frac{d^2x}{dt^2}&=-\frac{\partial V}{\partial x}\\ -\frac{d^2y}{dt^2}&=-\frac{\partial V}{\partial y} -\end{align} -$$ - -where - -$$V(x,y)={\frac {1}{2}}(x^{2}+y^{2})+\lambda \left(x^{2}y-{\frac {y^{3}}{3}}\right).$$ - -We pick $\lambda=1$ in this case, so - -$$V(x,y) = \frac{1}{2}(x^2+y^2+2x^2y-\frac{2}{3}y^3).$$ - -Then the total energy of the system can be expressed by - -$$E = T+V = V(x,y)+\frac{1}{2}(\dot{x}^2+\dot{y}^2).$$ - -The total energy should conserve as this system evolves. - -```julia -using OrdinaryDiffEq, Plots - -#Setup -initial = [0.,0.1,0.5,0] -tspan = (0,100.) - -#Remember, V is the potential of the system and T is the Total Kinetic Energy, thus E will -#the total energy of the system. -V(x,y) = 1//2 * (x^2 + y^2 + 2x^2*y - 2//3 * y^3) -E(x,y,dx,dy) = V(x,y) + 1//2 * (dx^2 + dy^2); - -#Define the function -function Hénon_Heiles(du,u,p,t) - x = u[1] - y = u[2] - dx = u[3] - dy = u[4] - du[1] = dx - du[2] = dy - du[3] = -x - 2x*y - du[4] = y^2 - y -x^2 -end - -#Pass to solvers -prob = ODEProblem(Hénon_Heiles, initial, tspan) -sol = solve(prob, Vern9(), abs_tol=1e-16, rel_tol=1e-16); -``` - -```julia -# Plot the orbit -plot(sol, vars=(1,2), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false) -``` - -```julia -#Optional Sanity check - what do you think this returns and why? -@show sol.retcode - -#Plot - -plot(sol, vars=(1,3), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity") -plot!(sol, vars=(2,4), leg = false) -``` - -```julia -#We map the Total energies during the time intervals of the solution (sol.u here) to a new vector -#pass it to the plotter a bit more conveniently -energy = map(x->E(x...), sol.u) - -#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great. -@show ΔE = energy[1]-energy[end] - -#Plot -plot(sol.t, energy .- energy[1], title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") -``` - -##### Symplectic Integration - -To prevent energy drift, we can instead use a symplectic integrator. We can directly define and solve the `SecondOrderODEProblem`: - -```julia -function HH_acceleration!(dv,v,u,p,t) - x,y = u - dx,dy = dv - dv[1] = -x - 2x*y - dv[2] = y^2 - y -x^2 -end -initial_positions = [0.0,0.1] -initial_velocities = [0.5,0.0] -prob = SecondOrderODEProblem(HH_acceleration!,initial_velocities,initial_positions,tspan) -sol2 = solve(prob, KahanLi8(), dt=1/10); -``` - -Notice that we get the same results: - -```julia -# Plot the orbit -plot(sol2, vars=(3,4), title = "The orbit of the Hénon-Heiles system", xaxis = "x", yaxis = "y", leg=false) -``` - -```julia -plot(sol2, vars=(3,1), title = "Phase space for the Hénon-Heiles system", xaxis = "Position", yaxis = "Velocity") -plot!(sol2, vars=(4,2), leg = false) -``` - -but now the energy change is essentially zero: - -```julia -energy = map(x->E(x[3], x[4], x[1], x[2]), sol2.u) -#We use @show here to easily spot erratic behaviour in our system by seeing if the loss in energy was too great. -@show ΔE = energy[1]-energy[end] - -#Plot -plot(sol2.t, energy .- energy[1], title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") -``` - -And let's try to use a Runge-Kutta-Nyström solver to solve this. Note that Runge-Kutta-Nyström isn't symplectic. - -```julia -sol3 = solve(prob, DPRKN6()); -energy = map(x->E(x[3], x[4], x[1], x[2]), sol3.u) -@show ΔE = energy[1]-energy[end] -gr() -plot(sol3.t, energy .- energy[1], title = "Change in Energy over Time", xaxis = "Time in iterations", yaxis = "Change in Energy") -``` - -Note that we are using the `DPRKN6` sovler at `reltol=1e-3` (the default), yet it has a smaller energy variation than `Vern9` at `abs_tol=1e-16, rel_tol=1e-16`. Therefore, using specialized solvers to solve its particular problem is very efficient. - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/models/02-conditional_dosing.jmd b/tutorials/models/02-conditional_dosing.jmd deleted file mode 100644 index 91879003..00000000 --- a/tutorials/models/02-conditional_dosing.jmd +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: Conditional Dosing Pharmacometric Example -author: Chris Rackauckas ---- - -In this example we will show how to model a conditional dosing using the `DiscreteCallbacks`. The problem is as follows. The patient has a drug `A(t)` in their system. The concentration of the drug is given as `C(t)=A(t)/V` for some volume constant `V`. At `t=4`, the patient goes to the clinic and is checked. If the concentration of the drug in their body is below `4`, then they will receive a new dose. - -For our model, we will use the simple decay equation. We will write this in the in-place form to make it easy to extend to more complicated examples: - -```julia -using DifferentialEquations -function f(du,u,p,t) - du[1] = -u[1] -end -u0 = [10.0] -const V = 1 -prob = ODEProblem(f,u0,(0.0,10.0)) -``` - -Let's see what the solution looks like without any events. - -```julia -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) -``` - -We see that at time `t=4`, the patient should receive a dose. Let's code up that event. We need to check at `t=4` if the concentration `u[1]/4` is `<4`, and if so, add `10` to `u[1]`. We do this with the following: - -```julia -condition(u,t,integrator) = t==4 && u[1]/V<4 -affect!(integrator) = integrator.u[1] += 10 -cb = DiscreteCallback(condition,affect!) -``` - -Now we will give this callback to the solver, and tell it to stop at `t=4` so that way the condition can be checked: - -```julia -sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb) -using Plots; gr() -plot(sol) -``` - -Let's show that it actually added 10 instead of setting the value to 10. We could have set the value using `affect!(integrator) = integrator.u[1] = 10` - -```julia -println(sol(4.00000)) -println(sol(4.000000000001)) -``` - -Now let's model a patient whose decay rate for the drug is lower: - -```julia -function f(du,u,p,t) - du[1] = -u[1]/6 -end -u0 = [10.0] -const V = 1 -prob = ODEProblem(f,u0,(0.0,10.0)) -``` - -```julia -sol = solve(prob,Tsit5()) -using Plots; gr() -plot(sol) -``` - -Under the same criteria, with the same event, this patient will not receive a second dose: - -```julia -sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb) -using Plots; gr() -plot(sol) -``` - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/models/03-diffeqbio_I_introduction.jmd b/tutorials/models/03-diffeqbio_I_introduction.jmd deleted file mode 100644 index f1d3a111..00000000 --- a/tutorials/models/03-diffeqbio_I_introduction.jmd +++ /dev/null @@ -1,267 +0,0 @@ ---- -title: "DiffEqBiological Tutorial I: Introduction" -author: Samuel Isaacson ---- - -DiffEqBiological.jl is a domain specific language (DSL) for writing chemical -reaction networks in Julia. The generated chemical reaction network model can -then be translated into a variety of mathematical models which can be solved -using components of the broader -[DifferentialEquations.jl](http://sciml.ai/) ecosystem. - -In this tutorial we'll provide an introduction to using DiffEqBiological to -specify chemical reaction networks, and then to solve ODE, jump, tau-leaping and -SDE models generated from them. Let's start by using the DiffEqBiological -`reaction_network` macro to specify a simply chemical reaction network; the -well-known Repressilator. - -We first import the basic packages we'll need, and use Plots.jl for making -figures: - -```julia -# If not already installed, first hit "]" within a Julia REPL. Then type: -# add DifferentialEquations DiffEqBiological PyPlot Plots Latexify - -using DifferentialEquations, DiffEqBiological, Plots, Latexify -pyplot(fmt=:svg); -``` - -We now construct the reaction network. The basic types of arrows and predefined -rate laws one can use are discussed in detail within the DiffEqBiological -[Chemical Reaction Models -documentation](https://docs.sciml.ai/dev/models/biological). Here -we use a mix of first order, zero order and repressive Hill function rate laws. -Note, $\varnothing$ corresponds to the empty state, and is used for zeroth order -production and first order degradation reactions: - -```julia -repressilator = @reaction_network begin - hillr(P₃,α,K,n), ∅ --> m₁ - hillr(P₁,α,K,n), ∅ --> m₂ - hillr(P₂,α,K,n), ∅ --> m₃ - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - (δ,γ), m₃ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - β, m₃ --> m₃ + P₃ - μ, P₁ --> ∅ - μ, P₂ --> ∅ - μ, P₃ --> ∅ -end α K n δ γ β μ; -``` - -We can use Latexify to look at the corresponding reactions and understand the -generated rate laws for each reaction - -```julia; results="hidden"; -latexify(repressilator; env=:chemical) -``` -```julia; echo=false; skip="notebook"; -mathjax = WEAVE_ARGS[:doctype] == "pdf" ? false : true -x = latexify(repressilator; env=:chemical, starred=true, mathjax=mathjax); -display("text/latex", "$x"); -``` - -We can also use Latexify to look at the corresponding ODE model for the chemical -system - -```julia; results="hidden"; -latexify(repressilator, cdot=false) -``` -```julia; echo=false; skip="notebook"; -x = latexify(repressilator, cdot=false, starred=true); -display("text/latex", "$x"); -``` - -To solve the ODEs we need to specify the values of the parameters in the model, -the initial condition, and the time interval to solve the model on. To do this -it helps to know the orderings of the parameters and the species. Parameters are -ordered in the same order they appear after the `end` statement in the -`@reaction_network` macro. Species are ordered in the order they first appear -within the `@reaction_network` macro. We can see these orderings using the -`speciesmap` and `paramsmap` functions: - -```julia -speciesmap(repressilator) -``` - -```julia -paramsmap(repressilator) -``` - -## Solving the ODEs: -Knowing these orderings, we can create parameter and initial condition vectors, -and setup the `ODEProblem` we want to solve: - -```julia -# parameters [α,K,n,δ,γ,β,μ] -p = (.5, 40, 2, log(2)/120, 5e-3, 20*log(2)/120, log(2)/60) - -# initial condition [m₁,m₂,m₃,P₁,P₂,P₃] -u₀ = [0.,0.,0.,20.,0.,0.] - -# time interval to solve on -tspan = (0., 10000.) - -# create the ODEProblem we want to solve -oprob = ODEProblem(repressilator, u₀, tspan, p) -``` - -At this point we are all set to solve the ODEs. We can now use any ODE solver -from within the DiffEq package. We'll just use the default DifferentialEquations -solver for now, and then plot the solutions: - -```julia -sol = solve(oprob, saveat=10.) -plot(sol, fmt=:svg) -``` - -We see the well-known oscillatory behavior of the repressilator! For more on -choices of ODE solvers, see the JuliaDiffEq -[documentation](https://docs.sciml.ai/dev/solvers/ode_solve). - ---- - -## Stochastic Simulation Algorithms (SSAs) for Stochastic Chemical Kinetics -Let's now look at a stochastic chemical kinetics model of the repressilator, -modeling it with jump processes. Here we will construct a DiffEqJump -`JumpProblem` that uses Gillespie's `Direct` method, and then solve it to -generate one realization of the jump process: - -```julia -# first we redefine the initial condition to be integer valued -u₀ = [0,0,0,20,0,0] - -# next we create a discrete problem to encode that our species are integer valued: -dprob = DiscreteProblem(repressilator, u₀, tspan, p) - -# now we create a JumpProblem, and specify Gillespie's Direct Method as the solver: -jprob = JumpProblem(dprob, Direct(), repressilator, save_positions=(false,false)) - -# now let's solve and plot the jump process: -sol = solve(jprob, SSAStepper(), saveat=10.) -plot(sol, fmt=:svg) -``` - -Here we see that oscillations remain, but become much noiser. Note, in -constructing the `JumpProblem` we could have used any of the SSAs that are part -of DiffEqJump instead of the `Direct` method, see the list of SSAs (i.e. -constant rate jump aggregators) in the -[documentation](https://docs.sciml.ai/latest/types/jump_types/#Constant-Rate-Jump-Aggregators-1). - ---- -## $\tau$-leaping Methods: -While SSAs generate exact realizations for stochastic chemical kinetics jump -process models, [$\tau$-leaping](https://en.wikipedia.org/wiki/Tau-leaping) -methods offer a performant alternative by discretizing in time the underlying -time-change representation of the stochastic process. The DiffEqJump package has -limited support for $\tau$-leaping methods in the form of the basic Euler's -method type approximation proposed by Gillespie. We can simulate a $\tau$-leap -approximation to the repressilator by using the `RegularJump` representation of -the network to construct a `JumpProblem`: - -```julia -rjs = regularjumps(repressilator) -lprob = JumpProblem(dprob, Direct(), rjs) -lsol = solve(lprob, SimpleTauLeaping(), dt=.1) -plot(lsol, plotdensity=1000, fmt=:svg) -``` - ---- -## Chemical Langevin Equation (CLE) Stochastic Differential Equation (SDE) Models: -At an intermediary physical scale between macroscopic ODE models and microscopic -stochastic chemical kinetic models lies the CLE, a SDE version of the model. The -SDEs add to each ODE above a noise term. As the repressilator has species that -get very close to zero in size, it is not a good candidate to model with the CLE -(where solutions can then go negative and become unphysical). Let's create a -simpler reaction network for a birth-death process that will stay non-negative: - -```julia -bdp = @reaction_network begin - c₁, X --> 2X - c₂, X --> 0 - c₃, 0 --> X -end c₁ c₂ c₃ -p = (1.0,2.0,50.) -u₀ = [5.] -tspan = (0.,4.); -``` - -The corresponding Chemical Langevin Equation SDE is then - -```julia; results="hidden"; -latexify(bdp, noise=true, cdot=false) -``` -```julia; echo=false; skip="notebook"; -x = latexify(bdp, noise=true, cdot=false, starred=true); -display("text/latex", "$x"); -``` - -where each $W_i(t)$ denotes an independent Brownian Motion. We can solve the CLE -SDE model by creating an `SDEProblem` and solving it similar to what we did for -ODEs above: - -```julia -# SDEProblem for CLE -sprob = SDEProblem(bdp, u₀, tspan, p) - -# solve and plot, tstops is used to specify enough points -# that the plot looks well-resolved -sol = solve(sprob, tstops=range(0., step=4e-3, length=1001)) -plot(sol, fmt=:svg) -``` - -We again have complete freedom to select any of the -StochasticDifferentialEquations.jl SDE solvers, see the -[documentation](https://docs.sciml.ai/dev/solvers/sde_solve). - ---- -## What information can be queried from the reaction_network: -The generated `reaction_network` contains a lot of basic information. For example -- `f=oderhsfun(repressilator)` is a function `f(du,u,p,t)` that given the current - state vector `u` and time `t` fills `du` with the time derivatives of `u` - (i.e. the right hand side of the ODEs). -- `jac=jacfun(repressilator)` is a function `jac(J,u,p,t)` that evaluates and - returns the Jacobian of the ODEs in `J`. A corresponding Jacobian matrix of - expressions can be accessed using the `jacobianexprs` function: -```julia; results="hidden"; -latexify(jacobianexprs(repressilator), cdot=false) -``` -```julia; echo=false; skip="notebook"; -x = latexify(jacobianexprs(repressilator), cdot=false, starred=true); -display("text/latex", "$x"); -``` -- `pjac = paramjacfun(repressilator)` is a function `pjac(pJ,u,p,t)` that - evaluates and returns the Jacobian, `pJ`, of the ODEs *with respect to the - parameters*. This allows `reaction_network`s to be used in the - DifferentialEquations.jl local sensitivity analysis package - [DiffEqSensitivity](https://docs.sciml.ai/dev/analysis/sensitivity). - - -By default, generated `ODEProblems` will be passed the corresponding Jacobian -function, which will then be used within implicit ODE/SDE methods. - -The [DiffEqBiological API -documentation](https://docs.sciml.ai/dev/apis/diffeqbio) provides -a thorough description of the many query functions that are provided to access -network properties and generated functions. In DiffEqBiological Tutorial II -we'll explore the API. - ---- -## Getting Help -Have a question related to DiffEqBiological or this tutorial? Feel free to ask -in the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby). -If you think you've found a bug in DiffEqBiological, or would like to -request/discuss new functionality, feel free to open an issue on -[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check -there is no related issue already open). If you've found a bug in this tutorial, -or have a suggestion, feel free to open an issue on the [SciMLTutorials Github -site](https://github.com/JuliaDiffEq/SciMLTutorials.jl). Or, submit a pull -request to SciMLTutorials updating the tutorial! - ---- -```julia; echo=false; skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) -``` diff --git a/tutorials/models/04-diffeqbio_II_networkproperties.jmd b/tutorials/models/04-diffeqbio_II_networkproperties.jmd deleted file mode 100644 index 2b0714e4..00000000 --- a/tutorials/models/04-diffeqbio_II_networkproperties.jmd +++ /dev/null @@ -1,488 +0,0 @@ ---- -title: "DiffEqBiological Tutorial II: Network Properties API" -author: Samuel Isaacson ---- - -The [DiffEqBiological -API](https://docs.sciml.ai/dev/apis/diffeqbio) provides a -collection of functions for easily accessing network properties, and for -incrementally building and extending a network. In this tutorial we'll go -through the API, and then illustrate how to programmatically construct a -network. - -We'll illustrate the API using a toggle-switch like network that contains a -variety of different reaction types: - -```julia -using DifferentialEquations, DiffEqBiological, Latexify, Plots -fmt = :svg -pyplot(fmt=fmt) -rn = @reaction_network begin - hillr(D₂,α,K,n), ∅ --> m₁ - hillr(D₁,α,K,n), ∅ --> m₂ - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - μ, P₁ --> ∅ - μ, P₂ --> ∅ - (k₊,k₋), 2P₁ ↔ D₁ - (k₊,k₋), 2P₂ ↔ D₂ - (k₊,k₋), P₁+P₂ ↔ T -end α K n δ γ β μ k₊ k₋; -``` - -This corresponds to the chemical reaction network given by - -```julia; results="hidden"; -latexify(rn; env=:chemical) -``` -```julia; echo=false; skip="notebook"; -x = latexify(rn; env=:chemical, starred=true, mathjax=true); -display("text/latex", "$x"); -``` - ---- -## Network Properties -[Basic -properties](https://docs.sciml.ai/latest/apis/diffeqbio/#Basic-properties-1) -of the generated network include the `speciesmap` and `paramsmap` functions we -examined in the last tutorial, along with the corresponding `species` and -`params` functions: - -```julia -species(rn) -``` -```julia -params(rn) -``` - -The numbers of species, parameters and reactions can be accessed using -`numspecies(rn)`, `numparams(rn)` and `numreactions(rn)`. - -A number of functions are available to access [properties of -reactions](https://docs.sciml.ai/latest/apis/diffeqbio/#Reaction-Properties-1) -within the generated network, including `substrates`, `products`, `dependents`, -`ismassaction`, `substratestoich`, `substratesymstoich`, `productstoich`, -`productsymstoich`, and `netstoich`. Each of these functions takes two -arguments, the reaction network `rn` and the index of the reaction to query -information about. For example, to find the substrate symbols and their -corresponding stoichiometries for the 11th reaction, `2P₁ --> D₁`, we would use - -```julia -substratesymstoich(rn, 11) -``` - -Broadcasting works on all these functions, allowing the construction of a vector -holding the queried information across all reactions, i.e. - -```julia -substratesymstoich.(rn, 1:numreactions(rn)) -``` - -To see the net stoichiometries for all reactions we would use - -```julia -netstoich.(rn, 1:numreactions(rn)) -``` - -Here the first integer in each pair corresponds to the index of the species -(with symbol `species(rn)[index]`). The second integer corresponds to the net -stoichiometric coefficient of the species within the reaction. `substratestoich` -and `productstoich` are defined similarly. - -Several functions are also provided that calculate different types of -[dependency -graphs](https://docs.sciml.ai/latest/apis/diffeqbio/#Dependency-Graphs-1). -These include `rxtospecies_depgraph`, which provides a mapping from reaction -index to the indices of species whose population changes when the reaction -occurs: - -```julia -rxtospecies_depgraph(rn) -``` - -Here the last row indicates that the species with indices `[3,4,7]` will change -values when the reaction `T --> P₁ + P₂` occurs. To confirm these are the -correct species we can look at - -```julia -species(rn)[[3,4,7]] -``` - -The `speciestorx_depgraph` similarly provides a mapping from species to reactions -for which their *rate laws* depend on that species. These correspond to all reactions -for which the given species is in the `dependent` set of the reaction. We can verify this -for the first species, `m₁`: - -```julia -speciestorx_depgraph(rn)[1] -``` -```julia -findall(depset -> in(:m₁, depset), dependents.(rn, 1:numreactions(rn))) -``` - -Finally, `rxtorx_depgraph` provides a mapping that shows when a given reaction -occurs, which other reactions have rate laws that involve species whose value -would have changed: - -```julia -rxtorx_depgraph(rn) -``` - -#### Note on Using Network Property API Functions -Many basic network query and reaction property functions are simply accessors, -returning information that is already stored within the generated -`reaction_network`. For these functions, modifying the returned data structures -may lead to inconsistent internal state within the network. As such, they should -be used for accessing, but not modifying, network properties. The [API -documentation](https://docs.sciml.ai/dev/apis/diffeqbio) -indicates which functions return newly allocated data structures and which -return data stored within the `reaction_network`. - ---- -## Incremental Construction of Networks -The `@reaction_network` macro is monolithic, in that it not only constructs and -stores basic network properties such as the reaction stoichiometries, but also -generates **everything** needed to immediately solve ODE, SDE and jump models -using the network. This includes Jacobian functions, noise functions, and jump -functions for each reaction. While this allows for a compact interface to the -DifferentialEquations.jl solvers, it can also be computationally expensive for -large networks, where a user may only wish to solve one type of problem and/or -have fine-grained control over what is generated. In addition, some types of -reaction network structures are more amenable to being constructed -programmatically, as opposed to writing out all reactions by hand within one -macro. For these reasons DiffEqBiological provides two additional macros that -only *initially* setup basic reaction network properties, and which can be -extended through a programmatic interface: `@min_reaction_network` and -`@empty_reaction_network`. We now give an introduction to constructing these -more minimal network representations, and how they can be programmatically -extended. See also the relevant [API -section](https://docs.sciml.ai/latest/apis/diffeqbio/#Reaction-Network-Generation-Macros-1). - -The `@min_reaction_network` macro works identically to the `@reaction_network` -macro, but the generated network will only be complete with respect to its -representation of chemical network properties (i.e. species, parameters and -reactions). No ODE, SDE or jump models are generated during the macro call. It -can subsequently be extended with the addition of new species, parameters or -reactions. The `@empty_reaction_network` allocates an empty network structure -that can also be extended using the programmatic interface. For example, consider -a partial version of the toggle-switch like network we defined above: - -```julia -rnmin = @min_reaction_network begin - (δ,γ), m₁ ↔ ∅ - (δ,γ), m₂ ↔ ∅ - β, m₁ --> m₁ + P₁ - β, m₂ --> m₂ + P₂ - μ, P₁ --> ∅ - μ, P₂ --> ∅ -end δ γ β μ; -``` - -Here we have left out the first two, and last three, reactions from the original -`reaction_network`. To expand the network until it is functionally equivalent to -the original model we add back in the missing species, parameters, and *finally* -the missing reactions. Note, it is required that species and parameters be -defined before any reactions using them are added. The necessary network -extension functions are given by `addspecies!`, `addparam!` and `addreaction!`, -and described in the -[API](https://docs.sciml.ai/latest/apis/diffeqbio/#Functions-to-Add-Species,-Parameters-and-Reactions-to-a-Network-1). To complete `rnmin` we first add the relevant -species: - -```julia -addspecies!(rnmin, :D₁) -addspecies!(rnmin, :D₂) -addspecies!(rnmin, :T) -``` - -Next we add the needed parameters - -```julia -addparam!(rnmin, :α) -addparam!(rnmin, :K) -addparam!(rnmin, :n) -addparam!(rnmin, :k₊) -addparam!(rnmin, :k₋) -``` - -Note, both `addspecies!` and `addparam!` also accept strings encoding the -variable names (which are then converted to `Symbol`s internally). - -We are now ready to add the missing reactions. The API provides two forms of the -`addreaction!` function, one takes expressions analogous to what one would write -in the macro: - -```julia -addreaction!(rnmin, :(hillr(D₁,α,K,n)), :(∅ --> m₂)) -addreaction!(rnmin, :((k₊,k₋)), :(2P₂ ↔ D₂)) -addreaction!(rnmin, :k₊, :(2P₁ --> D₁)) -addreaction!(rnmin, :k₋, :(D₁ --> 2P₁)) -``` - -The rate can be an expression or symbol as above, but can also just be a -numeric value. The second form of `addreaction!` takes tuples of -`Pair{Symbol,Int}` that encode the stoichiometric coefficients of substrates and -reactants: - -```julia -# signature is addreaction!(rnmin, paramexpr, substratestoich, productstoich) -addreaction!(rnmin, :(hillr(D₂,α,K,n)), (), (:m₁ => 1,)) -addreaction!(rnmin, :k₊, (:P₁=>1, :P₂=>1), (:T=>1,)) -addreaction!(rnmin, :k₋, (:T=>1,), (:P₁=>1, :P₂=>1)) -``` - -Let's check that `rn` and `rnmin` have the same set of species: - -```julia -setdiff(species(rn), species(rnmin)) -``` - -the same set of params: - -```julia -setdiff(params(rn), params(rnmin)) -``` - -and the final reaction has the same substrates, reactions, and rate expression: - -```julia -rxidx = numreactions(rn) -setdiff(substrates(rn, rxidx), substrates(rnmin, rxidx)) -``` -```julia -setdiff(products(rn, rxidx), products(rnmin, rxidx)) -``` -```julia -rateexpr(rn, rxidx) == rateexpr(rnmin, rxidx) -``` - ---- -## Extending Incrementally Generated Networks to Include ODEs, SDEs or Jumps -Once a network generated from `@min_reaction_network` or -`@empty_reaction_network` has had all the associated species, parameters and -reactions filled in, corresponding ODE, SDE or jump models can be constructed. -The relevant API functions are `addodes!`, `addsdes!` and `addjumps!`. One -benefit to contructing models with these functions is that they offer more -fine-grained control over what actually gets constructed. For example, -`addodes!` has the optional keyword argument, `build_jac`, which if set to -`false` will disable construction of symbolic Jacobians and functions for -evaluating Jacobians. For large networks this can give a significant speed-up in -the time required for constructing an ODE model. Each function and its -associated keyword arguments are described in the API section, [Functions to add -ODEs, SDEs or Jumps to a -Network](https://docs.sciml.ai/latest/apis/diffeqbio/#Functions-to-Add-ODEs,-SDEs-or-Jumps-to-a-Network-1). - -Let's extend `rnmin` to include the needed functions for use in ODE -solvers: - -```julia -addodes!(rnmin) -``` - -The [Generated Functions for -Models](https://docs.sciml.ai/latest/apis/diffeqbio/#Generated-Functions-for-Models-1) -section of the API shows what functions have been generated. For ODEs these -include `oderhsfun(rnmin)`, which returns a function of the form `f(du,u,p,t)` -which evaluates the ODEs (i.e. the time derivatives of `u`) within `du`. For -each generated function, the corresponding expressions from which it was -generated can be retrieved using accessors from the [Generated -Expressions](https://docs.sciml.ai/latest/apis/diffeqbio/#Generated-Expressions-1) -section of the API. The equations within `du` can be retrieved using the -`odeexprs(rnmin)` function. For example: - -```julia -odeexprs(rnmin) -``` - -Using Latexify we can see the ODEs themselves to compare with these expressions: - -```julia; results="hidden"; -latexify(rnmin) -``` -```julia; echo=false; skip="notebook"; -x = latexify(rnmin, starred=true); -display("text/latex", "$x"); -``` - -For ODEs two other functions are generated by `addodes!`. `jacfun(rnmin)` will -return the generated Jacobian evaluation function, `fjac(dJ,u,p,t)`, which given -the current solution `u` evaluates the Jacobian within `dJ`. -`jacobianexprs(rnmin)` gives the corresponding matrix of expressions, which can -be used with Latexify to see the Jacobian: - -```julia; results="hidden"; -latexify(jacobianexprs(rnmin)) -``` -```julia; echo=false; skip="notebook"; -x = latexify(jacobianexprs(rnmin), starred=true); -display("text/latex", "$x"); -``` - -`addodes!` also generates a function that evaluates the Jacobian of the ODE -derivative functions with respect to the parameters. `paramjacfun(rnmin)` then -returns the generated function. It has the form `fpjac(dPJ,u,p,t)`, which -given the current solution `u` evaluates the Jacobian matrix with respect to -parameters `p` within `dPJ`. For use in DifferentialEquations.jl solvers, an -[`ODEFunction`](https://docs.sciml.ai/dev/features/performance_overloads) -representation of the ODEs is available from `odefun(rnmin)`. - -`addsdes!` and `addjumps!` work similarly to complete the network for use in -StochasticDiffEq and DiffEqJump solvers. - -#### Note on Using Generated Function and Expression API Functions -The generated functions and expressions accessible through the API require first -calling the appropriate `addodes!`, `addsdes` or `addjumps` function. These are -responsible for actually constructing the underlying functions and expressions. -The API accessors simply return already constructed functions and expressions -that are stored within the `reaction_network` structure. - ---- -## Example of Generating a Network Programmatically -For a user directly typing in a reaction network, it is generally easier to use -the `@min_reaction_network` or `@reaction_network` macros to fully specify -reactions. However, for large, structured networks it can be much easier to -generate the network programmatically. For very large networks, with tens of -thousands of reactions, the form of `addreaction!` that uses stoichiometric -coefficients should be preferred as it offers substantially better performance. -To put together everything we've seen, let's generate the network corresponding -to a 1D continuous time random walk, approximating the diffusion of molecules -within an interval. - -The basic "reaction" network we wish to study is - -$$ -u_1 \leftrightarrows u_2 \leftrightarrows u_3 \cdots \leftrightarrows u_{N} -$$ - -for $N$ lattice sites on $[0,1]$. For $h = 1/N$ the lattice spacing, we'll -assume the rate molecules hop from their current site to any particular neighbor -is just $h^{-2}$. We can interpret this hopping process as a collection of -$2N-2$ "reactions", with the form $u_i \to u_j$ for $j=i+1$ or $j=i-1$. We construct -the corresponding reaction network as follows. First we set values for the basic -parameters: -```julia -N = 64 -h = 1 / N -``` - -then we create an empty network, and add each species - -```julia -rn = @empty_reaction_network - -for i = 1:N - addspecies!(rn, Symbol(:u, i)) -end -``` - -We next add one parameter `β`, which we will set equal to the hopping rate -of molecules, $h^{-2}$: - -```julia -addparam!(rn, :β) -``` - -Finally, we add in the $2N-2$ possible hopping reactions: -```julia -for i = 1:N - (i < N) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i+1)=>1,)) - (i > 1) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i-1)=>1,)) -end -``` - -Let's first construct an ODE model for the network - -```julia -addodes!(rn) -``` - -We now need to specify the initial condition, parameter vector and time interval -to solve on. We start with 10000 molecules placed at the center of the domain, -and setup an `ODEProblem` to solve: - -```julia -u₀ = zeros(N) -u₀[div(N,2)] = 10000 -p = [1/(h*h)] -tspan = (0.,.01) -oprob = ODEProblem(rn, u₀, tspan, p) -``` - -We are now ready to solve the problem and plot the solution. Since we have -essentially generated a method of lines discretization of the diffusion equation -with a discontinuous initial condition, we'll use an A-L stable implicit ODE -solver, `Rodas5`, and plot the solution at a few times: - -```julia -sol = solve(oprob, Rodas5()) -times = [0., .0001, .001, .01] -plt = plot() -for time in times - plot!(plt, 1:N, sol(time), fmt=fmt, xlabel="i", ylabel="uᵢ", label=string("t = ", time), lw=3) -end -plot(plt, ylims=(0.,10000.)) -``` - -Here we see the characteristic diffusion of molecules from the center of the -domain, resulting in a shortening and widening of the solution as $t$ increases. - -Let's now look at a stochastic chemical kinetics jump process version of the -model, where β gives the probability per time each molecule can hop from its -current lattice site to an individual neighboring site. We first add in the -jumps, disabling `regular_jumps` since they are not needed, and using the -`minimal_jumps` flag to construct a minimal representation of the needed jumps. -We then construct a `JumpProblem`, and use the Composition-Rejection Direct -method, `DirectCR`, to simulate the process of the molecules hopping about on -the lattice: - -```julia -addjumps!(rn, build_regular_jumps=false, minimal_jumps=true) - -# make the initial condition integer valued -u₀ = zeros(Int, N) -u₀[div(N,2)] = 10000 - -# setup and solve the problem -dprob = DiscreteProblem(rn, u₀, tspan, p) -jprob = JumpProblem(dprob, DirectCR(), rn, save_positions=(false,false)) -jsol = solve(jprob, SSAStepper(), saveat=times) -``` - -We can now plot bar graphs showing the locations of the molecules at the same -set of times we examined the ODE solution. For comparison, we also plot the -corresponding ODE solutions (red lines) that we found: -```julia -times = [0., .0001, .001, .01] -plts = [] -for i = 1:4 - b = bar(1:N, jsol[i], legend=false, fmt=fmt, xlabel="i", ylabel="uᵢ", title=string("t = ", times[i])) - plot!(b,sol(times[i])) - push!(plts,b) -end -plot(plts...) -``` - -Similar to the ODE solutions, we see that the molecules spread out and become -more and more well-mixed throughout the domain as $t$ increases. The simulation -results are noisy due to the finite numbers of molecules present in the -stochsatic simulation, but since the number of molecules is large they agree -well with the ODE solution at each time. - ---- -## Getting Help -Have a question related to DiffEqBiological or this tutorial? Feel free to ask -in the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby). -If you think you've found a bug in DiffEqBiological, or would like to -request/discuss new functionality, feel free to open an issue on -[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check -there is no related issue already open). If you've found a bug in this tutorial, -or have a suggestion, feel free to open an issue on the [SciMLTutorials Github -site](https://github.com/JuliaDiffEq/SciMLTutorials.jl). Or, submit a pull -request to SciMLTutorials updating the tutorial! - ---- -```julia; echo=false; skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) -``` diff --git a/tutorials/models/04b-diffeqbio_III_steadystates.jmd b/tutorials/models/04b-diffeqbio_III_steadystates.jmd deleted file mode 100644 index 0aa5cc8d..00000000 --- a/tutorials/models/04b-diffeqbio_III_steadystates.jmd +++ /dev/null @@ -1,191 +0,0 @@ ---- -title: "DiffEqBiological Tutorial III: Steady-States and Bifurcations" -author: Torkel Loman and Samuel Isaacson ---- - -Several types of steady state analysis can be performed for networks defined -with DiffEqBiological by utilizing homotopy continuation. This allows for -finding the steady states and bifurcations within a large class of systems. In -this tutorial we'll go through several examples of using this functionality. - -We start by loading the necessary packages: -```julia -using DiffEqBiological, Plots -gr(); default(fmt = :png); -``` - -### Steady states and stability of a biochemical reaction network. -Bistable switches are well known biological motifs, characterised by the -presence of two different stable steady states. - -```julia -bistable_switch = @reaction_network begin - d, (X,Y) → ∅ - hillR(Y,v1,K1,n1), ∅ → X - hillR(X,v2,K2,n2), ∅ → Y -end d v1 K1 n1 v2 K2 n2 -d = 0.01; -v1 = 1.5; K1 = 30; n1 = 3; -v2 = 1.; K2 = 30; n2 = 3; -bistable_switch_p = [d, v1 ,K1, n1, v2, K2, n2]; -``` - -The steady states can be found using the `steady_states` function (which takes a reaction network and a set of parameter values as input). The stability of these steady states can be found using the `stability` function. - -```julia -ss = steady_states(bistable_switch, bistable_switch_p) -``` - -```julia -stability(ss,bistable_switch, bistable_switch_p) -``` - -Since the equilibration methodology is based on homotopy continuation, it is not -able to handle systems with non-integer exponents, or non polynomial reaction -rates. Neither of the following two systems will work. - -This system contains a non-integer exponent: -```julia -rn1 = @reaction_network begin - p, ∅ → X - hill(X,v,K,n), X → ∅ -end p v K n -p1 = [1.,2.5,1.5,1.5] -steady_states(rn1,p1) -``` - -This system contains a logarithmic reaction rate: -```julia -rn2 = @reaction_network begin - p, ∅ → X - log(X), X → ∅ -end p -p2 = [1.] -steady_states(rn2,p2) -``` - -### Bifurcation diagrams for biochemical reaction networks -Bifurcation diagrams illustrate how the steady states of a system depend on one -or more parameters. They can be computed with the `bifurcations` function. It -takes the same arguments as `steady_states`, with the addition of the parameter -one wants to vary, and an interval over which to vary it: - -```julia -bif = bifurcations(bistable_switch, bistable_switch_p, :v1, (.1,5.)) -plot(bif,ylabel="[X]",label="") -plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) -``` - -The values for the second variable in the system can also be displayed, by -giving that as an additional input to `plot` (it is the second argument, directly -after the bifurcation diagram object): - -```julia -plot(bif,2,ylabel="[Y]") -plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) -``` - -The `plot` function also accepts all other arguments which the Plots.jl `plot` function accepts. - -```julia -bif = bifurcations(bistable_switch, bistable_switch_p,:v1,(.1,10.)) -plot(bif,linewidth=1.,title="A bifurcation diagram",ylabel="Steady State concentration") -plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) -``` - -Certain parameters, like `n1`, cannot be sensibly varied over a continuous -interval. Instead, a discrete bifurcation diagram can be calculated with the -`bifurcation_grid` function. Instead of an interval, the last argument is a -range of numbers: - -```julia -bif = bifurcation_grid(bistable_switch, bistable_switch_p,:n1,1.:5.) -plot(bif) -scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) -``` - -### Bifurcation diagrams over two dimensions -In addition to the bifurcation diagrams illustrated above, where only a single -variable is varied, it is also possible to investigate the steady state -properties of s system as two different parameters are varied. Due to the nature -of the underlying bifurcation algorithm it is not possible to continuously vary -both parameters. Instead, a set of discrete values are selected for the first -parameter, and a continuous interval for the second. Next, for each discrete -value of the first parameter, a normal bifurcation diagram is created over the -interval given for the second parameter. - -```julia -bif = bifurcation_grid_diagram(bistable_switch, bistable_switch_p,:n1,0.:4.,:v1,(.1,5.)) -plot(bif) -plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) -``` - -In the single variable case we could use a `bifurcation_grid` to investigate the -behavior of a parameter which could only attain discrete values. In the same -way, if we are interested in two parameters, both of which require integer -values, we can use `bifrucation_grid_2d`. In our case, this is required if we -want to vary both the parameters `n1` and `n2`: - -```julia -bif = bifurcation_grid_2d(bistable_switch, bistable_switch_p,:n1,1.:3.,:n2,1.:10.) -plot(bif) -scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) -``` - -### The Brusselator -The Brusselator is a well know reaction network, which may or may not oscillate, -depending on parameter values. - -```julia -brusselator = @reaction_network begin - A, ∅ → X - 1, 2X + Y → 3X - B, X → Y - 1, X → ∅ -end A B; -A = 0.5; B = 4.; -brusselator_p = [A, B]; -``` - -The system has only one steady state, for $(X,Y)=(A,B/A)$ This fixed point -becomes unstable when $B > 1+A^2$, leading to oscillations. Bifurcation diagrams -can be used to determine the system's stability, and hence look for where oscillations might appear in the Brusselator: - -```julia -bif = bifurcations(brusselator,brusselator_p,:B,(0.1,2.5)) -plot(bif,2) -plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"]) -``` - -Here red and yellow colors label unstable steady-states, while blue and cyan -label stable steady-states. (In addition, yellow and cyan correspond to points -where at least one eigenvalue of the Jacobian is imaginary, while red and blue -correspond to points with real-valued eigenvalues.) - -Given `A=0.5`, the point at which the system should become unstable is `B=1.25`. We can confirm this in the bifurcation diagram. - -We can also investigate the behavior when we vary both parameters of the system: - -```julia -bif = bifurcation_grid_diagram(brusselator,brusselator_p,:B,0.5:0.02:5.0,:A,(0.2,5.0)) -plot(bif) -plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"]) -``` - ---- -## Getting Help -Have a question related to DiffEqBiological or this tutorial? Feel free to ask -in the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby). -If you think you've found a bug in DiffEqBiological, or would like to -request/discuss new functionality, feel free to open an issue on -[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check -there is no related issue already open). If you've found a bug in this tutorial, -or have a suggestion, feel free to open an issue on the [SciMLTutorials Github -site](https://github.com/JuliaDiffEq/SciMLTutorials.jl). Or, submit a pull -request to SciMLTutorials updating the tutorial! - ---- -```julia; echo=false; skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) -``` diff --git a/tutorials/models/05-kepler_problem.jmd b/tutorials/models/05-kepler_problem.jmd deleted file mode 100644 index d687cf83..00000000 --- a/tutorials/models/05-kepler_problem.jmd +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: Kepler Problem -author: Yingbo Ma, Chris Rackauckas ---- - -The Hamiltonian $\mathcal {H}$ and the angular momentum $L$ for the Kepler problem are - -$$\mathcal {H} = \frac{1}{2}(\dot{q}^2_1+\dot{q}^2_2)-\frac{1}{\sqrt{q^2_1+q^2_2}},\quad -L = q_1\dot{q_2} - \dot{q_1}q_2$$ - -Also, we know that - -$${\displaystyle {\frac {\mathrm {d} {\boldsymbol {p}}}{\mathrm {d} t}}=-{\frac {\partial {\mathcal {H}}}{\partial {\boldsymbol {q}}}}\quad ,\quad {\frac {\mathrm {d} {\boldsymbol {q}}}{\mathrm {d} t}}=+{\frac {\partial {\mathcal {H}}}{\partial {\boldsymbol {p}}}}}$$ - -```julia -using OrdinaryDiffEq, LinearAlgebra, ForwardDiff, Plots; gr() -H(q,p) = norm(p)^2/2 - inv(norm(q)) -L(q,p) = q[1]*p[2] - p[1]*q[2] - -pdot(dp,p,q,params,t) = ForwardDiff.gradient!(dp, q->-H(q, p), q) -qdot(dq,p,q,params,t) = ForwardDiff.gradient!(dq, p-> H(q, p), p) - -initial_position = [.4, 0] -initial_velocity = [0., 2.] -initial_cond = (initial_position, initial_velocity) -initial_first_integrals = (H(initial_cond...), L(initial_cond...)) -tspan = (0,20.) -prob = DynamicalODEProblem(pdot, qdot, initial_velocity, initial_position, tspan) -sol = solve(prob, KahanLi6(), dt=1//10); -``` - -Let's plot the orbit and check the energy and angular momentum variation. We know that energy and angular momentum should be constant, and they are also called first integrals. - -```julia -plot_orbit(sol) = plot(sol,vars=(3,4), lab="Orbit", title="Kepler Problem Solution") - -function plot_first_integrals(sol, H, L) - plot(initial_first_integrals[1].-map(u->H(u[2,:], u[1,:]), sol.u), lab="Energy variation", title="First Integrals") - plot!(initial_first_integrals[2].-map(u->L(u[2,:], u[1,:]), sol.u), lab="Angular momentum variation") -end -analysis_plot(sol, H, L) = plot(plot_orbit(sol), plot_first_integrals(sol, H, L)) -``` - -```julia -analysis_plot(sol, H, L) -``` - -Let's try to use a Runge-Kutta-Nyström solver to solve this problem and check the first integrals' variation. - -```julia -sol2 = solve(prob, DPRKN6()) # dt is not necessary, because unlike symplectic - # integrators DPRKN6 is adaptive -@show sol2.u |> length -analysis_plot(sol2, H, L) -``` - -Let's then try to solve the same problem by the `ERKN4` solver, which is specialized for sinusoid-like periodic function - -```julia -sol3 = solve(prob, ERKN4()) # dt is not necessary, because unlike symplectic - # integrators ERKN4 is adaptive -@show sol3.u |> length -analysis_plot(sol3, H, L) -``` - -We can see that `ERKN4` does a bad job for this problem, because this problem is not sinusoid-like. - -One advantage of using `DynamicalODEProblem` is that it can implicitly convert the second order ODE problem to a *normal* system of first order ODEs, which is solvable for other ODE solvers. Let's use the `Tsit5` solver for the next example. - -```julia -sol4 = solve(prob, Tsit5()) -@show sol4.u |> length -analysis_plot(sol4, H, L) -``` - -#### Note - -There is drifting for all the solutions, and high order methods are drifting less because they are more accurate. - -### Conclusion - ---- - -Symplectic integrator does not conserve the energy completely at all time, but the energy can come back. In order to make sure that the energy fluctuation comes back eventually, symplectic integrator has to have a fixed time step. Despite the energy variation, symplectic integrator conserves the angular momentum perfectly. - -Both Runge-Kutta-Nyström and Runge-Kutta integrator do not conserve energy nor the angular momentum, and the first integrals do not tend to come back. An advantage Runge-Kutta-Nyström integrator over symplectic integrator is that RKN integrator can have adaptivity. An advantage Runge-Kutta-Nyström integrator over Runge-Kutta integrator is that RKN integrator has less function evaluation per step. The `ERKN4` solver works best for sinusoid-like solutions. - -## Manifold Projection - -In this example, we know that energy and angular momentum should be conserved. We can achieve this through mainfold projection. As the name implies, it is a procedure to project the ODE solution to a manifold. Let's start with a base case, where mainfold projection isn't being used. - -```julia -using DiffEqCallbacks - -plot_orbit2(sol) = plot(sol,vars=(1,2), lab="Orbit", title="Kepler Problem Solution") - -function plot_first_integrals2(sol, H, L) - plot(initial_first_integrals[1].-map(u->H(u[1:2],u[3:4]), sol.u), lab="Energy variation", title="First Integrals") - plot!(initial_first_integrals[2].-map(u->L(u[1:2],u[3:4]), sol.u), lab="Angular momentum variation") -end - -analysis_plot2(sol, H, L) = plot(plot_orbit2(sol), plot_first_integrals2(sol, H, L)) - -function hamiltonian(du,u,params,t) - q, p = u[1:2], u[3:4] - qdot(@view(du[1:2]), p, q, params, t) - pdot(@view(du[3:4]), p, q, params, t) -end - -prob2 = ODEProblem(hamiltonian, [initial_position; initial_velocity], tspan) -sol_ = solve(prob2, RK4(), dt=1//5, adaptive=false) -analysis_plot2(sol_, H, L) -``` - -There is a significant fluctuation in the first integrals, when there is no mainfold projection. - -```julia -function first_integrals_manifold(residual,u) - residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4]) - residual[3:4] .= initial_first_integrals[2] - L(u[1:2], u[3:4]) -end - -cb = ManifoldProjection(first_integrals_manifold) -sol5 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=cb) -analysis_plot2(sol5, H, L) -``` - -We can see that thanks to the manifold projection, the first integrals' variation is very small, although we are using `RK4` which is not symplectic. But wait, what if we only project to the energy conservation manifold? - -```julia -function energy_manifold(residual,u) - residual[1:2] .= initial_first_integrals[1] - H(u[1:2], u[3:4]) - residual[3:4] .= 0 -end -energy_cb = ManifoldProjection(energy_manifold) -sol6 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=energy_cb) -analysis_plot2(sol6, H, L) -``` - -There is almost no energy variation but angular momentum varies quite bit. How about only project to the angular momentum conservation manifold? - -```julia -function angular_manifold(residual,u) - residual[1:2] .= initial_first_integrals[2] - L(u[1:2], u[3:4]) - residual[3:4] .= 0 -end -angular_cb = ManifoldProjection(angular_manifold) -sol7 = solve(prob2, RK4(), dt=1//5, adaptive=false, callback=angular_cb) -analysis_plot2(sol7, H, L) -``` - -Again, we see what we expect. - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/models/07-outer_solar_system.jmd b/tutorials/models/07-outer_solar_system.jmd deleted file mode 100644 index 021e33b9..00000000 --- a/tutorials/models/07-outer_solar_system.jmd +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: The Outer Solar System -author: Yingbo Ma, Chris Rackauckas ---- - -## Data - -The chosen units are: masses relative to the sun, so that the sun has mass $1$. We have taken $m_0 = 1.00000597682$ to take account of the inner planets. Distances are in astronomical units , times in earth days, and the gravitational constant is thus $G = 2.95912208286 \cdot 10^{-4}$. - -| planet | mass | initial position | initial velocity | -| --- | --- | --- | --- | -| Jupiter | $m_1 = 0.000954786104043$ | | -| Saturn | $m_2 = 0.000285583733151$ | | -| Uranus | $m_3 = 0.0000437273164546$ | | -| Neptune | $m_4 = 0.0000517759138449$ | | -| Pluto | $ m_5 = 1/(1.3 \cdot 10^8 )$ | | - -The data is taken from the book "Geometric Numerical Integration" by E. Hairer, C. Lubich and G. Wanner. - -```julia -using Plots, OrdinaryDiffEq, DiffEqPhysics, RecursiveArrayTools -gr() - -G = 2.95912208286e-4 -M = [1.00000597682, 0.000954786104043, 0.000285583733151, 0.0000437273164546, 0.0000517759138449, 1/1.3e8] -planets = ["Sun", "Jupiter", "Saturn", "Uranus", "Neptune", "Pluto"] - -pos_x = [0.0,-3.5023653,9.0755314,8.3101420,11.4707666,-15.5387357] -pos_y = [0.0,-3.8169847,-3.0458353,-16.2901086,-25.7294829,-25.2225594] -pos_z = [0.0,-1.5507963,-1.6483708,-7.2521278,-10.8169456,-3.1902382] -pos = ArrayPartition(pos_x,pos_y,pos_z) - -vel_x = [0.0,0.00565429,0.00168318,0.00354178,0.00288930,0.00276725] -vel_y = [0.0,-0.00412490,0.00483525,0.00137102,0.00114527,-0.00170702] -vel_z = [0.0,-0.00190589,0.00192462,0.00055029,0.00039677,-0.00136504] -vel = ArrayPartition(vel_x,vel_y,vel_z) - -tspan = (0.,200_000) -``` - -The N-body problem's Hamiltonian is - -$$H(p,q) = \frac{1}{2}\sum_{i=0}^{N}\frac{p_{i}^{T}p_{i}}{m_{i}} - G\sum_{i=1}^{N}\sum_{j=0}^{i-1}\frac{m_{i}m_{j}}{\left\lVert q_{i}-q_{j} \right\rVert}$$ - -Here, we want to solve for the motion of the five outer planets relative to the sun, namely, Jupiter, Saturn, Uranus, Neptune and Pluto. - -```julia -const ∑ = sum -const N = 6 -potential(p, t, x, y, z, M) = -G*∑(i->∑(j->(M[i]*M[j])/sqrt((x[i]-x[j])^2 + (y[i]-y[j])^2 + (z[i]-z[j])^2), 1:i-1), 2:N) -``` - -## Hamiltonian System - -`NBodyProblem` constructs a second order ODE problem under the hood. We know that a Hamiltonian system has the form of - -$$\dot{p} = -H_{q}(p,q)\quad \dot{q}=H_{p}(p,q)$$ - -For an N-body system, we can symplify this as: - -$$\dot{p} = -\nabla{V}(q)\quad \dot{q}=M^{-1}p.$$ - -Thus $\dot{q}$ is defined by the masses. We only need to define $\dot{p}$, and this is done internally by taking the gradient of $V$. Therefore, we only need to pass the potential function and the rest is taken care of. - -```julia -nprob = NBodyProblem(potential, M, pos, vel, tspan) -sol = solve(nprob,Yoshida6(), dt=100); -``` - -```julia -orbitplot(sol,body_names=planets) -``` - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/models/08-spiking_neural_systems.jmd b/tutorials/models/08-spiking_neural_systems.jmd deleted file mode 100644 index ff5b54d6..00000000 --- a/tutorials/models/08-spiking_neural_systems.jmd +++ /dev/null @@ -1,368 +0,0 @@ ---- -title: Spiking Neural Systems -author: Daniel Müller-Komorowska ---- - -This is an introduction to spiking neural systems with Julia's DifferentialEquations package. -We will cover three different models: leaky integrate-and-fire, Izhikevich, and Hodgkin-Huxley. -Finally we will also learn about two mechanisms that simulate synaptic inputs like -real neurons receive them. The alpha synapse and the Tsodyks-Markram synapse. Let's get started -with the leaky integrate-and-fire (LIF) model. -## The Leaky Integrate-and-Fire Model -The LIF model is an extension of the integrate-and-fire (IF) model. While the IF -model simply integrates input until it fires, the LIF model integrates input but -also decays towards an equilibrium potential. This means that inputs that arrive -in quick succession have a much higher chance to make the cell spike as opposed -to inputs that are further apart in time. The LIF is a more realistic neuron -model than the IF, because it is known from real neurons that the timing of -inputs is extremely relevant for their spiking. - -The LIF model has five parameters, `gL, EL, C, Vth, I` and we define it in the `lif(u, p, t)` function. - -```julia -using DifferentialEquations -using Plots -gr() - -function lif(u,p,t); - gL, EL, C, Vth, I = p - (-gL*(u-EL)+I)/C -end -``` - -Our system is described by one differential equation: `(-gL*(u-EL)+I)/C`, where -`u` is the voltage, `I` is the input, `gL` is the leak conductance, `EL` is the -equilibrium potential of the leak conductance and `C` is the membrane capacitance. -Generally, any change of the voltage is slowed down (filtered) by the membrane -capacitance. That's why we divide the whole equation by `C`. Without any external -input, the voltage always converges towards `EL`. If `u` is larger than `EL`, -`u` decreases until it is at `EL`. If `u` is smaller than `EL`, `u` increases -until it is at `EL`. The only other thing that can change the voltage is the -external input `I`. - -Our `lif` function requires a certain parameter structure because it will need -to be compatible with the `DifferentialEquations` interface. The input signature -is `lif(u, p, t)` where `u` is the voltage, `p` is the collection of the parameters -that describe the equation and `t` is time. You might wonder why time does not -show up in our equation, although we need to calculate the change in voltage -with respect to time. The ODE solver will take care of time for us. One of -the advantages of the ODE solver as opposed to calculating the change of -`u` in a for loop is that many ODE solver algorithms can dynamically adjust the -time step in a way that is efficient and accurate. - -One crucial thing is still missing however. This is supposed to be a model of -neural spiking, right? So we need a mechanism that recognizes the spike and -hyperpolarizes `u` in response. For this purpose we will use callbacks. -They can make discontinuous changes to the model when certain conditions are met. - -```julia -function thr(u,t,integrator) - integrator.u > integrator.p[4] -end - -function reset!(integrator) - integrator.u = integrator.p[2] -end - -threshold = DiscreteCallback(thr,reset!) -current_step= PresetTimeCallback([2,15],integrator -> integrator.p[5] += 210.0) -cb = CallbackSet(current_step,threshold) -``` - -Our condition is `thr(u,t,integrator)` and the condition kicks in when `integrator.u > integrator.p[4]` where `p[4]` is our threshold parameter `Vth`. Our effect of the condition is `reset!(integrator)`. It sets `u` back to the equilibrium potential `p[2]`. We then wrap both the condition and the effect into a `DiscreteCallback` called threshold. There is one more callback called `PresetTimeCallback` that is particularly useful. This one increases the input `p[5]` at `t=2` and `t=15` by `210.0`. Both callbacks are then combined into a `CallbackSet`. We are almost done to simulate our system we just need to put numbers on our initial voltage and parameters. - -```julia -u0 = -75 -tspan = (0.0, 40.0) -# p = (gL, EL, C, Vth, I) -p = [10.0, -75.0, 5.0, -55.0, 0] - -prob = ODEProblem(lif, u0, tspan, p, callback=cb) -``` - -Our initial voltage is `u0 = - 75`, which will be the same as our equilibrium potential, so we start at a stable point. Then we define the timespan we want to simulate. The time scale of the LIF as it is defined conforms roughly to milliseconds. Then we define our parameters as `p = [10.0, -75.0, 5.0, -55.0, 0]`. Remember that `gL, EL, C, Vth, I = p`. Finally we wrap everything into a call to `ODEProblem`. Can't forget the `CallbackSet`. With that our model is defined. Now we just need to solve it with a quick call to `solve`. - -```julia -sol = solve(prob) -``` - -First of all the `solve` output tells us if solving the system generally worked. In this case we know it worked because the return code (`retcode`) says `Success`. Then we get the numbers for the timestep and the solution to `u`. The raw numbers are not super interesting to let's plot our solution. - -```julia -plot(sol) -``` - -We see that the model is resting at `-75` while there is no input. At `t=2` the input increases by `210` and the model starts to spike. Spiking does not start immediately because the input first has to charge the membrane capacitance. Notice how once spiking starts it very quickly becomes extremely regular. Increasing the input again at `t=15` increases firing as we would expect but it is still extremely regular. This is one of the features of the LIF. The firing frequency is regular for constant input and a linear function of the input strength. There are ways to make LIF models less regular. For example we could use certain noise types at the input. We could also simulate a large number of LIF models and connect them synaptically. Instead of going into those topics, we will move on to the Izhikevich model, which is known for its ability to generate a large variety of spiking dynamics during constant inputs. - -## The Izhikevich Model -[The Izhikevich model](https://www.izhikevich.org/publications/spikes.htm) is a two-dimensional model of neuronal spiking. It was derived from a bifurcation analysis of a cortical neuron. Because it is two-dimensional it can generate much more complex spike dynamics than the LIF model. The kind of dynamics depend on the four parameters and the input `a, b, c, d, I = p`. All the concepts are the same as above, expect for some minor changes to our function definitions to accomodate for the second dimension. - -```julia -#Izhikevichch Model -using DifferentialEquations -using Plots - -function izh!(du,u,p,t); - a, b, c, d, I = p - - du[1] = 0.04*u[1]^2+5*u[1]+140-u[2]+I - du[2] = a*(b*u[1]-u[2]) -end -``` - -This is our Izhikevich model. There are two important changes here. First of all, note the additional input parameter `du`. This is a sequence of differences. `du[1]` corresponds to the voltage (the first dimension of the system) and `du[2]` corresponds to the second dimension. This second dimension is called `u` in the original Izhikevich work amnd it makes the notation a little annoying. In this tutorial I will generally stick to Julia and `DifferentialEquations` conventions as opposed to conventions of the specific models and `du` is commonly used. We will never define `du` ourselves outside of the function but the ODE solver will use it internally. The other change here is the `!` after our function name. This signifies that `du` will be preallocated before integration and then updated in-place, which saves a lot of allocation time. Now we just need our callbacks to take care of spikes and increase the input. - -```julia -function thr(u,t,integrator) - integrator.u[1] >= 30 -end - -function reset!(integrator) - integrator.u[1] = integrator.p[3] - integrator.u[2] += integrator.p[4] -end - -threshold = DiscreteCallback(thr,reset!) -current_step= PresetTimeCallback(50,integrator -> integrator.p[5] += 10) -cb = CallbackSet(current_step,threshold) -``` - -One key feature of the Izhikevich model is that each spike increases our second dimension `u[2]` by a preset amount `p[4]`. Between spikes `u[2]` decays to a value that depends on `p[1]` and `p[2]` and the equilibrium potential `p[3]`. Otherwise the code is not too different from the LIF model. We will again need to define our parameters and we are ready to simulate. - -```julia -p = [0.02, 0.2, -50, 2, 0] -u0 = [-65, p[2]*-65] -tspan = (0.0, 300) - -prob = ODEProblem(izh!, u0, tspan, p, callback=cb) -``` - -```julia -sol = solve(prob); -plot(sol, vars=1) -``` - -This spiking type is called chattering. It fires with intermittent periods of silence. Note that the input starts at `t=50` and remain constant for the duration of the simulation. One of mechanisms that sustains this type of firing is the spike induced hyperpolarization coming from our second dimension, so let's look at this variable. - -```julia -plot(sol, vars=2) -``` - -Our second dimension `u[2]` increases with every spike. When it becomes too large, the system cannot generate another spike until `u[2]` has decayed to a value small enough that spiking can resume. This process repeats. In this model, spiking is no longer regular like it was in the LIF. Here we have two frequencies, the frequency during the spiking state and the frequency between spiking states. The LIF model was dominated by one single frequency that was a function of the input strength. Let's see if we can generate another spiking type by changing the parameters. - -```julia -p = [0.02, 0.2, -65, 8, 0] -u0 = [-65, p[2]*-65] -tspan = (0.0, 300) - -prob = ODEProblem(izh!, u0, tspan, p, callback=cb) -sol = solve(prob); -plot(sol, vars=1) -``` - -This type is called regularly spiking and we created it just by lowering `p[3]` and increasing `p[4]`. Note that the type is called regularly spiking but it is not instantaneously regular. The instantenous frequency is higher in the beginning. This is called spike frequency adaptation and is a common property of real neurons. There are many more spike types that can be generated. Check out the [original Izhikevich work](https://www.izhikevich.org/publications/spikes.htm) and create your own favorite neuron! - -## Hodgkin-Huxley Model -The Hodgkin-Huxley (HH) model is our first biophysically realistic model. This means that all parameters and mechanisms of the model represent biological mechanisms. Specifically, the HH model simulates the ionic currents that depolarize and hyperpolarize a neuron during an action potential. This makes the HH model four-dimensional. Let's see how it looks. - -```julia -using DifferentialEquations -using Plots - -# Potassium ion-channel rate functions -alpha_n(v) = (0.02 * (v - 25.0)) / (1.0 - exp((-1.0 * (v - 25.0)) / 9.0)) -beta_n(v) = (-0.002 * (v - 25.0)) / (1.0 - exp((v - 25.0) / 9.0)) - -# Sodium ion-channel rate functions -alpha_m(v) = (0.182*(v + 35.0)) / (1.0 - exp((-1.0 * (v + 35.0)) / 9.0)) -beta_m(v) = (-0.124 * (v + 35.0)) / (1.0 - exp((v + 35.0) / 9.0)) - -alpha_h(v) = 0.25 * exp((-1.0 * (v + 90.0)) / 12.0) -beta_h(v) = (0.25 * exp((v + 62.0) / 6.0)) / exp((v + 90.0) / 12.0) - -function HH!(du,u,p,t); - gK, gNa, gL, EK, ENa, EL, C, I = p - v, n, m, h = u - - du[1] = (-(gK * (n^4.0) * (v - EK)) - (gNa * (m ^ 3.0) * h * (v - ENa)) - (gL * (v - EL)) + I) / C - du[2] = (alpha_n(v) * (1.0 - n)) - (beta_n(v) * n) - du[3] = (alpha_m(v) * (1.0 - m)) - (beta_m(v) * m) - du[4] = (alpha_h(v) * (1.0 - h)) - (beta_h(v) * h) -end -``` - -We have three different types of ionic conductances. Potassium, sodium and the leak. The potassium and sodium conducance are voltage gated. They increase or decrease depending on the voltage. In ion channel terms, open channels can transition to the closed state and closed channels can transition to the open state. It's probably easiest to start with the potassium current described by `gK * (n^4.0) * (EK - v)`. Here `gK` is the total possible conductance that we could reach if all potassium channels were open. If all channels were open, `n` would equal 1 which is usually not the case. The transition from open state to closed state is modeled in `alpha_n(v)` while the transition from closed to open is in `beta_n(v)`. Because potassium conductance is voltage gated, these transitions depend on `v`. The numbers in `alpha_n; beta_n` were calculated by Hodgkin and Huxley based on their extensive experiments on the squid giant axon. They also determined, that `n` needs to be taken to the power of 4 to correctly model the amount of open channels. - -The sodium current is not very different but it has two gating variables, `m, h` instead of one. The leak conductance gL has no gating variables because it is not voltage gated. Let's move on to the parameters. If you want all the details on the HH model you can find a great description [here](https://neuronaldynamics.epfl.ch/online/Ch2.S2.html). - -```julia -current_step= PresetTimeCallback(100,integrator -> integrator.p[8] += 1) - -# n, m & h steady-states -n_inf(v) = alpha_n(v) / (alpha_n(v) + beta_n(v)) -m_inf(v) = alpha_m(v) / (alpha_m(v) + beta_m(v)) -h_inf(v) = alpha_h(v) / (alpha_h(v) + beta_h(v)) - -p = [35.0, 40.0, 0.3, -77.0, 55.0, -65.0, 1, 0] -u0 = [-60, n_inf(-60), m_inf(-60), h_inf(-60)] -tspan = (0.0, 1000) - -prob = ODEProblem(HH!, u0, tspan, p, callback=current_step) -``` - -For the HH model we need only one callback. The PresetTimeCallback that starts our input current. We don't need to reset the voltage when it reaches threshold because the HH model has its own repolarization mechanism. That is the potassium current, which activates at large voltages and makes the voltage more negative. The three functions `n_inf; m_inf; h_inf` help us to find good initial values for the gating variables. Those functions tell us that the steady-state gating values should be for the initial voltage. The parameters were chosen in a way that the properties of the model roughly resemble that of a cortical pyramidal cell instead of the giant axon Hodgkin and Huxley were originally working on. - -```julia -sol = solve(prob); -plot(sol, vars=1) -``` - -That's some good regular voltage spiking. One of the cool things about a biophysically realistic model is that the gating variables tell us something about the mechanisms behind the action potential. You might have seen something like the following plot in a biology textbook. - -```julia -plot(sol, vars=[2,3,4], tspan=(105.0,130.0)) -``` - -So far we have only given our neurons very simple step inputs by simply changing -the number `I`. Actual neurons recieve their inputs mostly from chemical synapses. -They produce conductance changes with very complex structures. In the next -chapter we will try to incorporate a synapse into our HH model. - -## Alpha Synapse -One of the most simple synaptic mechanisms used in computational neuroscience -is the alpha synapse. When this mechanism is triggered, it causes an -instantanouse rise in conductance followed by an exponential decay. Let's -incorporate that into our HH model. - -```julia -function gSyn(max_gsyn, tau, tf, t); - if t-tf >= 0 - return max_gsyn * exp(-(t-tf)/tau) - else - return 0.0 - end -end -function HH!(du,u,p,t); - gK, gNa, gL, EK, ENa, EL, C, I, max_gSyn, ESyn, tau, tf = p - v, n, m, h = u - - ISyn = gSyn(max_gSyn, tau, tf, t) * (v - ESyn) - - du[1] = (-(gK * (n^4.0) * (v - EK)) - (gNa * (m ^ 3.0) * h * (v - ENa)) - (gL * (v - EL)) + I - ISyn) / C - du[2] = (alpha_n(v) * (1.0 - n)) - (beta_n(v) * n) - du[3] = (alpha_m(v) * (1.0 - m)) - (beta_m(v) * m) - du[4] = (alpha_h(v) * (1.0 - h)) - (beta_h(v) * h) -end -``` - -`gSyn` models the step to the maximum conductance and the following exponential decay with time constant `tau`. Of course we only want to integrate the conductance at and after time `tf`, the onset of the synaptic response. Before `tf`, `gSyn` returns zero. To convert the conductance to a current, we multiply by the difference between the current voltage and the synapses equilibrium voltage: `ISyn = gSyn(max_gSyn, tau, tf, t) * (v - ESyn)`. Later we will set the parameter `ESyn` to 0, making this synapse an excitatory synapse. Excitatory synapses have equilibrium potentials far above the resting potential. Let's see what our synapse does to the voltage of the cell. - -```julia -p = [35.0, 40.0, 0.3, -77.0, 55.0, -65.0, 1, 0, 0.008, 0, 20, 100] -tspan = (0.0, 200) -prob = ODEProblem(HH!, u0, tspan, p) -sol = solve(prob); -plot(sol, vars=1) -``` - -What you see here is called an excitatory postsynaptic potential (EPSP). It is the voltage response to a synaptic current. While our synaptic conductance rises instantly, the voltage response rises at a slower time course that is given by the membrane capacitance `C`. This particular voltage response is not strong enough to evoke spiking, so we say it is subthreshold. To get a suprathreshold response that evokes spiking we simply increase the parameter `max_gSyn` to increase the maximum conductance. - -```julia -p = [35.0, 40.0, 0.3, -77.0, 55.0, -65.0, 1, 0, 0.01, 0, 20, 100] -tspan = (0.0, 200) -prob = ODEProblem(HH!, u0, tspan, p) -sol = solve(prob); -plot!(sol, vars=1) -``` - -This plot shows both the subthreshold EPSP from above as well as the suprathreshold EPSP. Alpha synapses are nice because of their simplicity. Real synapses however, are extremely complex structures. One of the most important features of real synapses is that their maximum conductance is not the same on every event. The number and frequency of synaptic events changes the size of the maximum conductance in a dynamic way. While we usually avoid anatomical and biophysical details of real synapses, there is a widely used phenomenological way to capture those dynamics called the Tsodyks-Markram synapse. - -## Tsodyks-Markram Synapse -The Tsodyks-Markram synapse (TMS) is a dynamic system that models the changes of maximum conductance that occur between EPSPs at different frequencies. The single response is similar to the alpha synapse in that it rises instantaneously and decays exponentially. The maximum conductance it reaches depends on the event history. To simulate the TMS we need to incorporate three more dimensions, `u, R, gsyn` into our system. `u` decays towards 0, R decays towards 1 and gsyn decays towards 0 as it did with the alpha synapse. The crucial part of the TMS is in `epsp!`, where we handle the discontinuities when a synaptic event occurs. Instead of just setting `gsyn` to the maximum conductance `gmax`, we increment `gsyn` by a fraction of gmax that depends on the other two dynamic parameters. The frequency dependence comes from the size of the time constants `tau_u` and `tau_R`. Enough talk, let's simulate it. - -```julia -function HH!(du,u,p,t); - gK, gNa, gL, EK, ENa, EL, C, I, tau, tau_u, tau_R, u0, gmax, Esyn = p - v, n, m, h, u, R, gsyn = u - - du[1] = ((gK * (n^4.0) * (EK - v)) + (gNa * (m ^ 3.0) * h * (ENa - v)) + (gL * (EL - v)) + I + gsyn * (Esyn - v)) / C - du[2] = (alpha_n(v) * (1.0 - n)) - (beta_n(v) * n) - du[3] = (alpha_m(v) * (1.0 - m)) - (beta_m(v) * m) - du[4] = (alpha_h(v) * (1.0 - h)) - (beta_h(v) * h) - - # Synaptic variables - du[5] = -(u/tau_u) - du[6] = (1-R)/tau_R - du[7] = -(gsyn/tau) -end - -function epsp!(integrator); - integrator.u[5] += integrator.p[12] * (1 - integrator.u[5]) - integrator.u[7] += integrator.p[13] * integrator.u[5] * integrator.u[6] - integrator.u[6] -= integrator.u[5] * integrator.u[6] - -end - -epsp_ts= PresetTimeCallback(100:100:500, epsp!) - -p = [35.0, 40.0, 0.3, -77.0, 55.0, -65.0, 1, 0, 30, 1000, 50, 0.5, 0.005, 0] -u0 = [-60, n_inf(-60), m_inf(-60), h_inf(-60), 0.0, 1.0, 0.0] -tspan = (0.0, 700) -prob = ODEProblem(HH!, u0, tspan, p, callback=epsp_ts) -sol = solve(prob); -plot(sol, vars=1) -``` - -```julia -plot(sol, vars=7) -``` - -Both the voltage response as well as the conductances show what is called short-term facilitation. An increase in peak conductance over multiple synaptic events. Here the first event has a conductance of around 0.0025 and the last one of 0.004. We can plot the other two varialbes to see what underlies those dynamics - -```julia -plot(sol, vars=[5,6]) -``` - -Because of the time courses at play here, this facilitation is frequency dependent. If we increase the period between these events, facilitation does not occur. - -```julia -epsp_ts= PresetTimeCallback(100:1000:5100, epsp!) - -p = [35.0, 40.0, 0.3, -77.0, 55.0, -65.0, 1, 0, 30, 500, 50, 0.5, 0.005, 0] -u0 = [-60, n_inf(-60), m_inf(-60), h_inf(-60), 0.0, 1.0, 0.0] -tspan = (0.0, 5300) -prob = ODEProblem(HH!, u0, tspan, p, callback=epsp_ts) -sol = solve(prob); -plot(sol, vars=7) -``` - -```julia -plot(sol, vars=[5,6]) -``` - -We can also change these time constants such that the dynamics show short-term depression instead of facilitation. - -```julia -epsp_ts= PresetTimeCallback(100:100:500, epsp!) - -p = [35.0, 40.0, 0.3, -77.0, 55.0, -65.0, 1, 0, 30, 100, 1000, 0.5, 0.005, 0] -u0 = [-60, n_inf(-60), m_inf(-60), h_inf(-60), 0.0, 1.0, 0.0] -tspan = (0.0, 700) -prob = ODEProblem(HH!, u0, tspan, p, callback=epsp_ts) -sol = solve(prob); -plot(sol, vars=7) -``` - -```julia -plot(sol, vars=[5,6]) -``` - -Just changing those two time constants has changed the dynamics to short-term depression. This is still frequency dependent. Changing these parameters can generate a variety of different short-term dynamics. - -## Summary -That's it for now. Thanks for making it this far. If you want to learn more about neuronal dynamics, [this is a great resource](https://neuronaldynamics.epfl.ch/online/index.html). If you want to learn more about Julia check out the [official website](https://julialang.org/) and to learn more about the DifferentialEquations package you are in the right place, because this chapter is part of a [larger tutorial series about just that](https://github.com/SciML/SciMLTutorials.jl). - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/models/Project.toml b/tutorials/models/Project.toml deleted file mode 100644 index 19b5b543..00000000 --- a/tutorials/models/Project.toml +++ /dev/null @@ -1,40 +0,0 @@ -[deps] -Catalyst = "479239e8-5488-4da2-87a7-35f2df7eef83" -DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" -DiffEqDevTools = "f3b72e0c-5b89-59e1-b016-84e28bfd966d" -DiffEqPhysics = "055956cb-9e8b-5191-98cc-73ae4a59e68a" -DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" -Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" -Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" -ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -Latexify = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" -NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" -NeuralPDE = "315f7962-48a3-4962-8226-d0f33b1235f0" -Optim = "429524aa-4258-5aef-a3af-852621145aeb" -OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" -SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -StochasticDiffEq = "789caeaf-c7a9-5a7d-9973-96adeb23e2a0" -SciMLTutorials = "30cb0354-2223-46a9-baa0-41bdcfbe0178" - -[compat] -Catalyst = "5, 6.0" -DiffEqCallbacks = "2.13" -DiffEqDevTools = "2.22" -DiffEqPhysics = "3.5" -DifferentialEquations = "6.14" -Distributions = "0.23, 0.24, 0.25" -Flux = "0.10, 0.11, 0.12" -ForwardDiff = "0.10" -Latexify = "0.13, 0.14, 0.15" -ModelingToolkit = "3.10, 4.0, 5.0" -NLsolve = "4.4" -NeuralPDE = "2, 3.0" -Optim = "0.21, 0.22, 1.0" -OrdinaryDiffEq = "5.41" -Plots = "1.4" -RecursiveArrayTools = "2.5" -StochasticDiffEq = "6.23" diff --git a/tutorials/ode_extras/01-ModelingToolkit.jmd b/tutorials/ode_extras/01-ModelingToolkit.jmd deleted file mode 100644 index e1e6a409..00000000 --- a/tutorials/ode_extras/01-ModelingToolkit.jmd +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: ModelingToolkit.jl, An IR and Compiler for Scientific Models -author: Chris Rackauckas ---- - -A lot of people are building modeling languages for their specific domains. However, while the syntax my vary greatly between these domain-specific languages (DSLs), the internals of modeling frameworks are surprisingly similar: building differential equations, calculating Jacobians, etc. - -#### ModelingToolkit.jl is metamodeling systemitized - -After building our third modeling interface, we realized that this problem can be better approached by having a reusable internal structure which DSLs can target. This internal is ModelingToolkit.jl: an Intermediate Representation (IR) with a well-defined interface for defining system transformations and compiling to Julia functions for use in numerical libraries. Now a DSL can easily be written by simply defining the translation to ModelingToolkit.jl's primatives and querying for the mathematical quantities one needs. - -### Basic usage: defining differential equation systems, with performance! - -Let's explore the IR itself. ModelingToolkit.jl is friendly to use, and can used as a symbolic DSL in its own right. Let's define and solve the Lorenz differential equation system using ModelingToolkit to generate the functions: - -```{julia;line_width = 130} -using ModelingToolkit - -### Define a differential equation system - -@parameters t σ ρ β -@variables x(t) y(t) z(t) -@derivatives D'~t - -eqs = [D(x) ~ σ*(y-x), - D(y) ~ x*(ρ-z)-y, - D(z) ~ x*y - β*z] -de = ODESystem(eqs, t, [x,y,z], [σ,ρ,β]) -ode_f = ODEFunction(de) - -### Use in DifferentialEquations.jl - -using OrdinaryDiffEq -u₀ = ones(3) -tspan = (0.0,100.0) -p = [10.0,28.0,10/3] -prob = ODEProblem(ode_f,u₀,tspan,p) -sol = solve(prob,Tsit5()) - -using Plots -plot(sol,vars=(1,2,3)) -``` - -### ModelingToolkit is a compiler for mathematical systems - -At its core, ModelingToolkit is a compiler. It's IR is its type system, and its output are Julia functions (it's a compiler for Julia code to Julia code, written in Julia). - -DifferentialEquations.jl wants a function `f(u,p,t)` or `f(du,u,p,t)` for defining an ODE system, -so ModelingToolkit.jl builds both. First the out of place version: - -```{julia;line_width = 130} -generate_function(de)[1] -``` - -and the in-place: - -```{julia;line_width = 130} -generate_function(de)[2] -``` - -ModelingToolkit.jl can be used to calculate the Jacobian of the differential equation system: - -```{julia;line_width = 130} -jac = calculate_jacobian(de) -``` - -It will automatically generate functions for using this Jacobian within the stiff ODE solvers for faster solving: - -```{julia;line_width = 130} -jac_expr = generate_jacobian(de) -``` - -It can even do fancy linear algebra. Stiff ODE solvers need to perform an LU-factorization which is their most expensive part. But ModelingToolkit.jl can skip this operation and instead generate the analytical solution to a matrix factorization, and build a Julia function for directly computing the factorization, which is then optimized in LLVM compiler passes. - -```{julia;line_width = 130} -ModelingToolkit.generate_factorized_W(de)[1] -``` - -### Solving Nonlinear systems - -ModelingToolkit.jl is not just for differential equations. It can be used for any mathematical target that is representable by its IR. For example, let's solve a rootfinding problem `F(x)=0`. What we do is define a nonlinear system and generate a function for use in NLsolve.jl - -```{julia;line_width = 130} -@variables x y z -@parameters σ ρ β - -# Define a nonlinear system -eqs = [0 ~ σ*(y-x), - 0 ~ x*(ρ-z)-y, - 0 ~ x*y - β*z] -ns = NonlinearSystem(eqs, [x,y,z], [σ,ρ,β]) -nlsys_func = generate_function(ns) -``` - -We can then tell ModelingToolkit.jl to compile this function for use in NLsolve.jl, and then numerically solve the rootfinding problem: - -```{julia;line_width = 130} -nl_f = @eval eval(nlsys_func[2]) -# Make a closure over the parameters for for NLsolve.jl -f2 = (du,u) -> nl_f(du,u,(10.0,26.0,2.33)) - -using NLsolve -nlsolve(f2,ones(3)) -``` - -### Library of transformations on mathematical systems - -The reason for using ModelingToolkit is not just for defining performant Julia functions for solving systems, but also for performing mathematical transformations which may be required in order to numerically solve the system. For example, let's solve a third order ODE. The way this is done is by transforming the third order ODE into a first order ODE, and then solving the resulting ODE. This transformation is given by the `ode_order_lowering` function. - -```{julia;line_width = 130} -@derivatives D3'''~t -@derivatives D2''~t -@variables u(t), x(t) -eqs = [D3(u) ~ 2(D2(u)) + D(u) + D(x) + 1 - D2(x) ~ D(x) + 2] -de = ODESystem(eqs, t, [u,x], []) -de1 = ode_order_lowering(de) -``` - -```{julia;line_width = 130} -de1.eqs -``` - -This has generated a system of 5 first order ODE systems which can now be used in the ODE solvers. - -### Linear Algebra... for free? - -Let's take a look at how to extend ModelingToolkit.jl in new directions. Let's define a Jacobian just by using the derivative primatives by hand: - -```{julia;line_width = 130} -@parameters t σ ρ β -@variables x(t) y(t) z(t) -@derivatives D'~t Dx'~x Dy'~y Dz'~z -eqs = [D(x) ~ σ*(y-x), - D(y) ~ x*(ρ-z)-y, - D(z) ~ x*y - β*z] -J = [Dx(eqs[1].rhs) Dy(eqs[1].rhs) Dz(eqs[1].rhs) - Dx(eqs[2].rhs) Dy(eqs[2].rhs) Dz(eqs[2].rhs) - Dx(eqs[3].rhs) Dy(eqs[3].rhs) Dz(eqs[3].rhs)] -``` - -Notice that this writes the derivatives in a "lazy" manner. If we want to actually compute the derivatives, we can expand out those expressions: - -```{julia;line_width = 130} -J = expand_derivatives.(J) -``` - -Here's the magic of ModelingToolkit.jl: **Julia treats ModelingToolkit expressions like a Number, and so generic numerical functions are directly usable on ModelingToolkit expressions!** Let's compute the LU-factorization of this Jacobian we defined using Julia's Base linear algebra library. - -```{julia;line_width = 130} -using LinearAlgebra -luJ = lu(J,Val(false)) -``` - -```{julia;line_width = 130} -luJ.L -``` - -and the inverse? - -```{julia;line_width = 130} -invJ = inv(luJ) -``` - -#### Thus ModelingToolkit.jl can utilize existing numerical code on symbolic codes - -Let's follow this thread a little deeper. - -### Automatically convert numerical codes to symbolic - -Let's take someone's code written to numerically solve the Lorenz equation: - -```{julia;line_width = 130} -function lorenz(du,u,p,t) - du[1] = p[1]*(u[2]-u[1]) - du[2] = u[1]*(p[2]-u[3]) - u[2] - du[3] = u[1]*u[2] - p[3]*u[3] -end -``` - -Since ModelingToolkit can trace generic numerical functions in Julia, let's trace it with Operations. When we do this, it'll spit out a symbolic representation of their numerical code: - -```{julia;line_width = 130} -u = [x,y,z] -du = similar(u) -p = [σ,ρ,β] -lorenz(du,u,p,t) -du -``` - -We can then perform symbolic manipulations on their numerical code, and build a new numerical code that optimizes/fixes their original function! - -```{julia;line_width = 130} -J = [Dx(du[1]) Dy(du[1]) Dz(du[1]) - Dx(du[2]) Dy(du[2]) Dz(du[2]) - Dx(du[3]) Dy(du[3]) Dz(du[3])] -J = expand_derivatives.(J) -``` - -### Automated Sparsity Detection - -In many cases one has to speed up large modeling frameworks by taking into account sparsity. While ModelingToolkit.jl can be used to compute Jacobians, we can write a standard Julia function in order to get a spase matrix of expressions which automatically detects and utilizes the sparsity of their function. - -```{julia;line_width = 130} -using SparseArrays -function SparseArrays.SparseMatrixCSC(M::Matrix{T}) where {T<:ModelingToolkit.Expression} - idxs = findall(!iszero, M) - I = [i[1] for i in idxs] - J = [i[2] for i in idxs] - V = [M[i] for i in idxs] - return SparseArrays.sparse(I, J, V, size(M)...) -end -sJ = SparseMatrixCSC(J) -``` - -### Dependent Variables, Functions, Chain Rule - -"Variables" are overloaded. When you are solving a differential equation, the variable `u(t)` is actually a function of time. In order to handle these kinds of variables in a mathematically correct and extensible manner, the ModelingToolkit IR actually treats variables as functions, and constant variables are simply 0-ary functions (`t()`). - -We can utilize this idea to have parameters that are also functions. For example, we can have a parameter σ which acts as a function of 1 argument, and then utilize this function within our differential equations: - -```{julia;line_width = 130} -@parameters σ(..) -eqs = [D(x) ~ σ(t-1)*(y-x), - D(y) ~ x*(σ(t^2)-z)-y, - D(z) ~ x*y - β*z] -``` - -Notice that when we calculate the derivative with respect to `t`, the chain rule is automatically handled: - -```{julia;line_width = 130} -@derivatives Dₜ'~t -Dₜ(x*(σ(t^2)-z)-y) -expand_derivatives(Dₜ(x*(σ(t^2)-z)-y)) -``` - -### Hackability: Extend directly from the language - -ModelingToolkit.jl is written in Julia, and thus it can be directly extended from Julia itself. Let's define a normal Julia function and call it with a variable: - -```{julia;line_width = 130} -_f(x) = 2x + x^2 -_f(x) -``` - -Recall that when we do that, it will automatically trace this function and then build a symbolic expression. But what if we wanted our function to be a primative in the symbolic framework? This can be done by registering the function. - -```{julia;line_width = 130} -f(x) = 2x + x^2 -@register f(x) -``` - -Now this function is a new primitive: - -```{julia;line_width = 130} -f(x) -``` - -and we can now define derivatives of our function: - -```{julia;line_width = 130} -function ModelingToolkit.derivative(::typeof(f), args::NTuple{1,Any}, ::Val{1}) - 2 + 2args[1] -end -expand_derivatives(Dx(f(x))) -``` - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/ode_extras/02-feagin.jmd b/tutorials/ode_extras/02-feagin.jmd deleted file mode 100644 index 0e58f337..00000000 --- a/tutorials/ode_extras/02-feagin.jmd +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Feagin's Order 10, 12, and 14 Methods -author: Chris Rackauckas ---- - -DifferentialEquations.jl includes Feagin's explicit Runge-Kutta methods of orders 10/8, 12/10, and 14/12. These methods have such high order that it's pretty much required that one uses numbers with more precision than Float64. As a prerequisite reference on how to use arbitrary number systems (including higher precision) in the numerical solvers, please see the Solving Equations in With Chosen Number Types notebook. - -## Investigation of the Method's Error - -We can use Feagin's order 16 method as follows. Let's use a two-dimensional linear ODE. Like in the Solving Equations in With Chosen Number Types notebook, we change the initial condition to BigFloats to tell the solver to use BigFloat types. - -```julia -using DifferentialEquations -const linear_bigα = big(1.01) -f(u,p,t) = (linear_bigα*u) - -# Add analytical solution so that errors are checked -f_analytic(u0,p,t) = u0*exp(linear_bigα*t) -ff = ODEFunction(f,analytic=f_analytic) -prob = ODEProblem(ff,big(0.5),(0.0,1.0)) -sol = solve(prob,Feagin14(),dt=1//16,adaptive=false); -``` - -```julia -println(sol.errors) -``` - -Compare that to machine $\epsilon$ for Float64: - -```julia -eps(Float64) -``` - -The error for Feagin's method when the stepsize is 1/16 is 8 orders of magnitude below machine $\epsilon$! However, that is dependent on the stepsize. If we instead use adaptive timestepping with the default tolerances, we get - -```julia -sol =solve(prob,Feagin14()); -println(sol.errors); print("The length was $(length(sol))") -``` - -Notice that when the stepsize is much higher, the error goes up quickly as well. These super high order methods are best when used to gain really accurate approximations (using still modest timesteps). Some examples of where such precision is necessary is astrodynamics where the many-body problem is highly chaotic and thus sensitive to small errors. - -## Convergence Test - -The Order 14 method is awesome, but we need to make sure it's really that awesome. The following convergence test is used in the package tests in order to make sure the implementation is correct. Note that all methods have such tests in place. - -```julia -using DiffEqDevTools -dts = 1.0 ./ 2.0 .^(10:-1:4) -sim = test_convergence(dts,prob,Feagin14()) -``` - -For a view of what's going on, let's plot the simulation results. - -```julia -using Plots -gr() -plot(sim) -``` - -This is a clear trend indicating that the convergence is truly Order 14, which -is the estimated slope. - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/ode_extras/03-ode_minmax.jmd b/tutorials/ode_extras/03-ode_minmax.jmd deleted file mode 100644 index 3463516d..00000000 --- a/tutorials/ode_extras/03-ode_minmax.jmd +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Finding Maxima and Minima of DiffEq Solutions -author: Chris Rackauckas ---- - -### Setup - -In this tutorial we will show how to use Optim.jl to find the maxima and minima of solutions. Let's take a look at the double pendulum: - -```julia -#Constants and setup -using OrdinaryDiffEq -initial = [0.01, 0.01, 0.01, 0.01] -tspan = (0.,100.) - -#Define the problem -function double_pendulum_hamiltonian(udot,u,p,t) - α = u[1] - lα = u[2] - β = u[3] - lβ = u[4] - udot .= - [2(lα-(1+cos(β))lβ)/(3-cos(2β)), - -2sin(α) - sin(α+β), - 2(-(1+cos(β))lα + (3+2cos(β))lβ)/(3-cos(2β)), - -sin(α+β) - 2sin(β)*(((lα-lβ)lβ)/(3-cos(2β))) + 2sin(2β)*((lα^2 - 2(1+cos(β))lα*lβ + (3+2cos(β))lβ^2)/(3-cos(2β))^2)] -end - -#Pass to solvers -poincare = ODEProblem(double_pendulum_hamiltonian, initial, tspan) -``` - -```julia -sol = solve(poincare, Tsit5()) -``` - -In time, the solution looks like: - -```julia -using Plots; gr() -plot(sol, vars=[(0,3),(0,4)], leg=false, plotdensity=10000) -``` - -while it has the well-known phase-space plot: - -```julia -plot(sol, vars=(3,4), leg=false) -``` - -### Local Optimization - -Let's fine out what some of the local maxima and minima are. Optim.jl can be used to minimize functions, and the solution type has a continuous interpolation which can be used. Let's look for the local optima for the 4th variable around `t=20`. Thus our optimization function is: - -```julia -f = (t) -> sol(t,idxs=4) -``` - -`first(t)` is the same as `t[1]` which transforms the array of size 1 into a number. `idxs=4` is the same as `sol(first(t))[4]` but does the calculation without a temporary array and thus is faster. To find a local minima, we can simply call Optim on this function. Let's find a local minimum: - -```julia -using Optim -opt = optimize(f,18.0,22.0) -``` - -From this printout we see that the minimum is at `t=18.63` and the value is `-2.79e-2`. We can get these in code-form via: - -```julia -println(opt.minimizer) -println(opt.minimum) -``` - -To get the maximum, we just minimize the negative of the function: - -```julia -f = (t) -> -sol(first(t),idxs=4) -opt2 = optimize(f,0.0,22.0) -``` - -Let's add the maxima and minima to the plots: - -```julia -plot(sol, vars=(0,4), plotdensity=10000) -scatter!([opt.minimizer],[opt.minimum],label="Local Min") -scatter!([opt2.minimizer],[-opt2.minimum],label="Local Max") -``` - -Brent's method will locally minimize over the full interval. If we instead want a local maxima nearest to a point, we can use `BFGS()`. In this case, we need to optimize a vector `[t]`, and thus dereference it to a number using `first(t)`. - -```julia -f = (t) -> -sol(first(t),idxs=4) -opt = optimize(f,[20.0],BFGS()) -``` - -### Global Optimization - -If we instead want to find global maxima and minima, we need to look somewhere else. For this there are many choices. A pure Julia option is BlackBoxOptim.jl, but I will use NLopt.jl. Following the NLopt.jl tutorial but replacing their function with out own: - -```julia -import NLopt, ForwardDiff - -count = 0 # keep track of # function evaluations - -function g(t::Vector, grad::Vector) - if length(grad) > 0 - #use ForwardDiff for the gradients - grad[1] = ForwardDiff.derivative((t)->sol(first(t),idxs=4),t) - end - sol(first(t),idxs=4) -end -opt = NLopt.Opt(:GN_ORIG_DIRECT_L, 1) -NLopt.lower_bounds!(opt, [0.0]) -NLopt.upper_bounds!(opt, [40.0]) -NLopt.xtol_rel!(opt,1e-8) -NLopt.min_objective!(opt, g) -(minf,minx,ret) = NLopt.optimize(opt,[20.0]) -println(minf," ",minx," ",ret) -NLopt.max_objective!(opt, g) -(maxf,maxx,ret) = NLopt.optimize(opt,[20.0]) -println(maxf," ",maxx," ",ret) -``` - -```julia -plot(sol, vars=(0,4), plotdensity=10000) -scatter!([minx],[minf],label="Global Min") -scatter!([maxx],[maxf],label="Global Max") -``` - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/ode_extras/Project.toml b/tutorials/ode_extras/Project.toml deleted file mode 100644 index 2003fe26..00000000 --- a/tutorials/ode_extras/Project.toml +++ /dev/null @@ -1,22 +0,0 @@ -[deps] -DiffEqDevTools = "f3b72e0c-5b89-59e1-b016-84e28bfd966d" -DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" -NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd" -NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" -Optim = "429524aa-4258-5aef-a3af-852621145aeb" -OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -SciMLTutorials = "30cb0354-2223-46a9-baa0-41bdcfbe0178" - -[compat] -DiffEqDevTools = "2.22" -DifferentialEquations = "6.14" -ModelingToolkit = "3.10, 4.0, 5.0" -NLopt = "0.6" -NLsolve = "4.4" -Optim = "0.22, 1.0" -OrdinaryDiffEq = "5.41" -Plots = "1.4" diff --git a/tutorials/perturbation/01-perturbation_algebraic.jmd b/tutorials/perturbation/01-perturbation_algebraic.jmd deleted file mode 100644 index 6e206741..00000000 --- a/tutorials/perturbation/01-perturbation_algebraic.jmd +++ /dev/null @@ -1,287 +0,0 @@ ---- -title: Mixed Symbolic/Numerical Methods for Perturbation Theory - Algebraic Equations -author: Shahriar Iravanian ---- - -## Background - -[**Symbolics.jl**](https://github.com/JuliaSymbolics/Symbolics.jl) is a fast and modern Computer Algebra System (CAS) written in the Julia Programming Language. It is an integral part of the [SciML](https://sciml.ai/) ecosystem of differential equation solvers and scientific machine learning packages. While **Symbolics.jl** is primarily designed for modern scientific computing (e.g., auto-differentiation, machine learning), it is a powerful CAS and can also be useful for *classic* scientific computing. One such application is using the *perturbation* theory to solve algebraic and differential equations. - -Perturbation methods are a collection of techniques to solve intractable problems that generally don't have a closed solution but depend on a tunable parameter and have closed or easy solutions for some values of the parameter. The main idea is to assume a solution as a power series in the tunable parameter (say $ϵ$), such that $ϵ = 0$ corresponds to an easy solution. - -We will discuss the general steps of the perturbation methods to solve algebraic (this tutorial) and differential equations (*Mixed Symbolic/Numerical Methods for Perturbation Theory - Differential Equations*). - -The hallmark of the perturbation method is the generation of long and convoluted intermediate equations, which are subjected to algorithmic and mechanical manipulations. Therefore, these problems are well suited for CAS. In fact, CAS softwares have been used to help with the perturbation calculations since the early 1970s. - -In this tutorial our goal is to show how to use a mix of symbolic manipulations (**Symbolics.jl**) and numerical methods (**DifferentialEquations.jl**) to solve simple perturbation problems. - -## Solving the Quintic - -We start with the "hello world!" analog of the perturbation problems, solving the quintic (fifth-order) equations. We want to find a real valued $x$ such that $x^5 + x = 1$. According to the Abel's theorem, a general quintic equation does not have a closed form solution. Of course, we can easily solve this equation numerically; for example, by using the Newton's method. We use the following implementation of the Newton's method: - -```julia -using Symbolics, SymbolicUtils - -function solve_newton(f, x, x₀; abstol=1e-8, maxiter=50) - xₙ = Float64(x₀) - fₙ₊₁ = x - f / Symbolics.derivative(f, x) - - for i = 1:maxiter - xₙ₊₁ = substitute(fₙ₊₁, Dict(x => xₙ)) - if abs(xₙ₊₁ - xₙ) < abstol - return xₙ₊₁ - else - xₙ = xₙ₊₁ - end - end - return xₙ₊₁ -end -``` - -In this code, `Symbolics.derivative(eq, x)` does exactly what it names implies: it calculates the symbolic derivative of `eq` (a **Symbolics.jl** expression) with respect to `x` (a **Symbolics.jl** variable). We use `Symbolics.substitute(eq, D)` to evaluate the update formula by substituting variables or sub-expressions (defined in a dictionary `D`) in `eq`. It should be noted that `substitute` is the workhorse of our code and will be used multiple times in the rest of these tutorials. `solve_newton` is written with simplicity and clarity, and not performance, in mind but suffices for our purpose. - -Let's go back to our quintic. We can define a Symbolics variable as `@variables x` and then solve the equation `solve_newton(x^5 + x - 1, x, 1.0)` (here, `x₀ = 0` is our first guess). The answer is 0.7549. Now, let's see how we can solve the same problem using the perturbation methods. - -We introduce a tuning parameter $\epsilon$ into our equation: $x^5 + \epsilon x = 1$. If $\epsilon = 1$, we get our original problem. For $\epsilon = 0$, the problem transforms to an easy one: $x^5 = 1$ which has an exact real solution $x = 1$ (and four complex solutions which we ignore here). We expand $x$ as a power series on $\epsilon$: - -$$ - x(\epsilon) = a_0 + a_1 \epsilon + a_2 \epsilon^2 + O(\epsilon^3) - \,. -$$ - -$a_0$ is the solution of the easy equation, therefore $a_0 = 1$. Substituting into the original problem, - -$$ - (a_0 + a_1 \epsilon + a_2 \epsilon^2)^5 + \epsilon (a_0 + a_1 \epsilon + a_2 \epsilon^2) - 1 = 0 - \,. -$$ - -Expanding the equations, we get -$$ - \epsilon (1 + 5 a_1) + \epsilon^2 (a_1 + 5 a_2 + 10 a1_2) + 𝑂(\epsilon^3) = 0 - \,. -$$ - -This equation should hold for each power of $\epsilon$. Therefore, - -$$ - 1 + 5 a_1 = 0 - \,, -$$ - -and - -$$ - a_1 + 5 a_2 + 10 a_1^2 = 0 - \,. -$$ - -This system of equations does not initially seem to be linear because of the presence of terms like $10 a_1^2$, but upon closer inspection is found to be in fact linear (this is a feature of the perturbation methods). In addition, the system is in a triangular form, meaning the first equation depends only on $a_1$, the second one on $a_1$ and $a_2$, such that we can replace the result of $a_1$ from the first one into the second equation and remove the non-linear term. We solve the first equation to get $a_1 = -\frac{1}{5}$. Substituting in the second one and solve for $a_2$: - -$$ - a_2 = \frac{(-\frac{1}{5} + 10(-(\frac{1}{5})²)}{5} = -\frac{1}{25} - \,. -$$ - -Finally, - -$$ - x(\epsilon) = 1 - \frac{\epsilon}{5} - \frac{\epsilon^2}{25} + O(\epsilon^3) - \,. -$$ - -Solving the original problem, $x(1) = 0.76$, compared to 0.7548 calculated numerically. We can improve the accuracy by including more terms in the expansion of $x$. However, the calculations, while straightforward, become messy and intractable to do manually very quickly. This is why a CAS is very helpful to solve perturbation problems. - -Now, let's see how we can do these calculations in Julia. Let $n$ be the order of the expansion. We start by defining the symbolic variables: - -```julia -n = 2 -@variables ϵ a[1:n] -``` - -Then, we define - -```julia -x = 1 + a[1]*ϵ + a[2]*ϵ^2 -``` - -The next step is to substitute `x` in the problem equation - -```julia - eq = x^5 + ϵ*x - 1 -``` - -The expanded form of `eq` is - -```julia -expand(eq) -``` - -We need a way to get the coefficients of different powers of `ϵ`. Function `collect_powers(eq, x, ns)` returns the powers of variable `x` in expression `eq`. Argument `ns` is the range of the powers. - -```julia -function collect_powers(eq, x, ns; max_power=100) - eq = substitute(expand(eq), Dict(x^j => 0 for j=last(ns)+1:max_power)) - - eqs = [] - for i in ns - powers = Dict(x^j => (i==j ? 1 : 0) for j=1:last(ns)) - push!(eqs, substitute(eq, powers)) - end - eqs -end -``` - -To return the coefficients of $ϵ$ and $ϵ^2$ in `eq`, we can write - -```julia -eqs = collect_powers(eq, ϵ, 1:2) -``` - -A few words on how `collect_powers` works, It uses `substitute` to find the coefficient of a given power of `x` by passing a `Dict` with all powers of `x` set to 0, except the target power which is set to 1. For example, the following expression returns the coefficient of `ϵ^2` in `eq`, - -```julia -substitute(expand(eq), Dict( - ϵ => 0, - ϵ^2 => 1, - ϵ^3 => 0, - ϵ^4 => 0, - ϵ^5 => 0, - ϵ^6 => 0, - ϵ^7 => 0, - ϵ^8 => 0) -) -``` - -Back to our problem. Having the coefficients of the powers of `ϵ`, we can set each equation in `eqs` to 0 (remember, we rearrange the problem such that `eq` is 0) and solve the system of linear equations to find the numerical values of the coefficients. **Symbolics.jl** has a function `Symbolics.solve_for` that can solve systems of linear equations. However, the presence of higher order terms in `eqs` prevents `Symbolics.solve_for(eqs .~ 0, a)` from workings properly. Instead, we can exploit the fact that our system is in a triangular form and start by solving `eqs[1]` for `a₁` and then substitute this in `eqs[2]` and solve for `a₂` (as continue the same process for higher order terms). This *cascading* process is done by function `solve_coef(eqs, ps)`: - -```julia -function solve_coef(eqs, ps) - vals = Dict() - - for i = 1:length(ps) - eq = substitute(eqs[i], vals) - vals[ps[i]] = Symbolics.solve_for(eq ~ 0, ps[i]) - end - vals -end -``` - -Here, `eqs` is an array of expressions (assumed to be equal to 0) and `ps` is an array of variables. The result is a dictionary of *variable* => *value* pairs. We apply `solve_coef` to `eqs` to get the numerical values of the parameters: - -```julia -solve_coef(eqs, a) -``` - -Finally, we substitute back the values of `a` in the definition of `x` as a function of `𝜀`. Note that `𝜀` is a number (usually Float64), whereas `ϵ` is a symbolic variable. - -```julia -X = 𝜀 -> 1 + a[1]*𝜀 + a[2]*𝜀^2 -``` - -Therefore, the solution to our original problem becomes `X(1)`, which is equal to 0.76. We can use larger values of `n` to improve the accuracy of estimations. - -| n | x | -|---|----------------| -|1 |0.8 | -|2 |0.76| -|3 |0.752| -|4 |0.752| -|5 |0.7533| -|6 |0.7543| -|7 |0.7548| -|8 |0.7550| - -Remember the numerical value is 0.7549. The two functions `collect_powers` and `solve_coef(eqs, a)` are used in all the examples in this and the next tutorial. - -## Solving the Kepler's Equation - -Historically, the perturbation methods were first invented to solve orbital calculations of the Moon and the planets. In homage to this history, our second example has a celestial theme. Our goal is solve the Kepler's equation: - -$$ - E - e\sin(E) = M - \,. -$$ - -where $e$ is the *eccentricity* of the elliptical orbit, $M$ is the *mean anomaly*, and $E$ (unknown) is the *eccentric anomaly* (the angle between the position of a planet in an elliptical orbit and the point of periapsis). This equation is central to solving two-body Keplerian orbits. - -Similar to the first example, it is easy to solve this problem using the Newton's method. For example, let $e = 0.01671$ (the eccentricity of the Earth) and $M = \pi/2$. We have `solve_newton(x - e*sin(x) - M, x, M)` equals to 1.5875 (compared to π/2 = 1.5708). Now, we try to solve the same problem using the perturbation techniques (see function `test_kepler`). - -For $e = 0$, we get $E = M$. Therefore, we can use $e$ as our perturbation parameter. For consistency with other problems, we also rename $e$ to $\epsilon$ and $E$ to $x$. - -From here on, we use the helper function `def_taylor` to define Taylor's series by calling it as `x = def_taylor(ϵ, a, 1)`, where the arguments are, respectively, the perturbation variable, an array of coefficients (starting from the coefficient of $\epsilon^1$), and an optional constant term. - -```julia -def_taylor(x, ps) = sum([a*x^i for (i,a) in enumerate(ps)]) -def_taylor(x, ps, p₀) = p₀ + def_taylor(x, ps) -``` - -We start by defining the variables (assuming `n = 3`): - -```julia -n = 3 -@variables ϵ M a[1:n] -x = def_taylor(ϵ, a, M) -``` - -We further simplify by substituting `sin` with its power series using the `expand_sin` helper function: - -```julia -expand_sin(x, n) = sum([(isodd(k) ? -1 : 1)*(-x)^(2k-1)/factorial(2k-1) for k=1:n]) -``` - -To test, - -```julia -expand_sin(0.1, 10) ≈ sin(0.1) -``` - -The problem equation is - -```julia -eq = x - ϵ * expand_sin(x, n) - M -``` - -We follow the same process as the first example. We collect the coefficients of the powers of `ϵ` - -```julia -eqs = collect_powers(eq, ϵ, 1:n) -``` - -and then solve for `a`: - -```julia -vals = solve_coef(eqs, a) -``` - -Finally, we substitute `vals` back in `x`: - -```julia -x′ = substitute(x, vals) -X = (𝜀, 𝑀) -> substitute(x′, Dict(ϵ => 𝜀, M => 𝑀)) -X(0.01671, π/2) -``` - -The result is 1.5876, compared to the numerical value of 1.5875. It is customary to order `X` based on the powers of `𝑀` instead of `𝜀`. We can calculate this series as `collect_powers(sol, M, 0:3) -`. The result (after manual cleanup) is - -``` -(1 + 𝜀 + 𝜀^2 + 𝜀^3)*𝑀 -- (𝜀 + 4*𝜀^2 + 10*𝜀^3)*𝑀^3/6 -+ (𝜀 + 16*𝜀^2 + 91*𝜀^3)*𝑀^5/120 -``` - -Comparing the formula to the one for 𝐸 in the [Wikipedia article on the Kepler's equation](https://en.wikipedia.org/wiki/Kepler%27s_equation): - -$$ - E = \frac{1}{1-\epsilon}M - -\frac{\epsilon}{(1-\epsilon)^4} \frac{M^3}{3!} + \frac{(9\epsilon^2 - + \epsilon)}{(1-\epsilon)^7}\frac{M^5}{5!}\cdots -$$ - -The first deviation is in the coefficient of $\epsilon^3 M^5$. - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/perturbation/02-perturbation_differential.jmd b/tutorials/perturbation/02-perturbation_differential.jmd deleted file mode 100644 index 0eab7457..00000000 --- a/tutorials/perturbation/02-perturbation_differential.jmd +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: Mixed Symbolic/Numerical Methods for Perturbation Theory - Differential Equations -author: Shahriar Iravanian ---- - -## Prelims - -In the previous tutorial, *Mixed Symbolic/Numerical Methods for Perturbation Theory - Algebraic Equations*, we discussed how to solve algebraic equations using **Symbolics.jl**. Here, our goal is to extend the method to differential equations. First, we import the following helper functions that were introduced in *Mixed Symbolic/Numerical Methods for Perturbation Theory - Algebraic Equations*. - -```julia -using Symbolics, SymbolicUtils - -def_taylor(x, ps) = sum([a*x^i for (i,a) in enumerate(ps)]) -def_taylor(x, ps, p₀) = p₀ + def_taylor(x, ps) - -function collect_powers(eq, x, ns; max_power=100) - eq = substitute(expand(eq), Dict(x^j => 0 for j=last(ns)+1:max_power)) - - eqs = [] - for i in ns - powers = Dict(x^j => (i==j ? 1 : 0) for j=1:last(ns)) - push!(eqs, substitute(eq, powers)) - end - eqs -end - -function solve_coef(eqs, ps) - vals = Dict() - - for i = 1:length(ps) - eq = substitute(eqs[i], vals) - vals[ps[i]] = Symbolics.solve_for(eq ~ 0, ps[i]) - end - vals -end -``` - -## The Trajectory of a Ball! - -In the first two examples, we applied the perturbation method to algebraic problems. However, the main power of the perturbation method is to solve differential equations (usually ODEs, but also occasionally PDEs). Surprisingly, the main procedure developed to solve algebraic problems works well for differential equations. In fact, we will use the same two helper functions, `collect_powers` and `solve_coef`. The main difference is in the way we expand the dependent variables. For algebraic problems, the coefficients of $\epsilon$ are constants; whereas, for differential equations, they are functions of the dependent variable (usually time). - -As the first ODE example, we have chosen a simple and well-behaved problem, which is a variation of a standard first-year physics problem: what is the trajectory of an object (say, a ball or a rocket) thrown vertically at velocity $v$ from the surface of a planet? Assuming a constant acceleration of gravity, $g$, every burgeoning physicist knows the answer: $x(t) = x(0) + vt - \frac{1}{2}gt^2$. However, what happens if $g$ is not constant? Specifically, $g$ is inversely proportional to the distant from the center of the planet. If $v$ is large and the projectile travels a large fraction of the radius of the planet, the assumption of constant gravity does not hold anymore. However, unless $v$ is large compared to the escape velocity, the correction is usually small. After simplifications and change of variables to dimensionless, the problem becomes - -$$ - \ddot{x}(t) = -\frac{1}{(1 + \epsilon x(t))^2} - \,, -$$ - -with the initial conditions $x(0) = 0$, and $\dot{x}(0) = 1$. Note that for $\epsilon = 0$, this equation transforms back to the standard one. Let's start with defining the variables - -```julia -n = 2 -@variables ϵ t y[0:n](t) ∂∂y[0:n] -``` - -Next, we define $x$. - -```julia -x = def_taylor(ϵ, y[2:end], y[1]) -``` - -We need the second derivative of `x`. It may seem that we can do this using `Differential(t)`; however, this operation needs to wait for a few steps because we need to manipulate the differentials as separate variables. Instead, we define dummy variables `∂∂y` as the placeholder for the second derivatives and define - -```julia -∂∂x = def_taylor(ϵ, ∂∂y[2:end], ∂∂y[1]) -``` - -as the second derivative of `x`. After rearrangement, our governing equation is $\ddot{x}(t)(1 + \epsilon x(t))^{-2} + 1 = 0$, or - -```julia -eq = ∂∂x * (1 + ϵ*x)^2 + 1 -``` - -The next two steps are the same as the ones for algebraic equations (note that we pass `0:n` to `collect_powers` because the zeroth order term is needed here) - -```julia -eqs = collect_powers(eq, ϵ, 0:n) -``` - -and, - -```julia -vals = solve_coef(eqs, ∂∂y) -``` - -Our system of ODEs is forming. Now is the time to convert `∂∂`s to the correct **Symbolics.jl** form by substitution: - -```julia -D = Differential(t) -subs = Dict(∂∂y[i] => D(D(y[i])) for i in eachindex(y)) -eqs = [substitute(first(v), subs) ~ substitute(last(v), subs) for v in vals] -``` - -We are nearly there! From this point on, the rest is standard ODE solving procedures. Potentially we can use a symbolic ODE solver to find a closed form solution to this problem. However, **Symbolics.jl** currently does not support this functionality. Instead, we solve the problem numerically. We form an `ODESystem`, lower the order (convert second derivatives to first), generate an `ODEProblem` (after passing the correct initial conditions), and, finally, solve it. - -```julia -using ModelingToolkit, DifferentialEquations - -sys = ODESystem(eqs, t) -sys = ode_order_lowering(sys) -states(sys) -``` - -```julia -# the initial conditions -# everything is zero except the initial velocity -u0 = zeros(2n+2) -u0[3] = 1.0 # y₀ˍt - -prob = ODEProblem(sys, u0, (0, 3.0)) -sol = solve(prob; dtmax=0.01) -``` - -Finally, we calculate the solution to the problem as a function of `ϵ` by substituting the solution to the ODE system back into the defining equation for `x`. Note that `𝜀` is a number, compared to `ϵ`, which is a symbolic variable. - -```julia -X = 𝜀 -> sum([𝜀^(i-1) * sol[y[i]] for i in eachindex(y)]) -``` - -Using `X`, we can plot the trajectory for a range of $𝜀$s. - -```julia -using Plots - -plot(sol.t, hcat([X(𝜀) for 𝜀 = 0.0:0.1:0.5]...)) -``` - -As expected, as `𝜀` becomes larger (meaning the gravity is less with altitude), the object goes higher and stays up for a longer duration. Of course, we could have solved the problem directly using as ODE solver. One of the benefits of the perturbation method is that we need to run the ODE solver only once and then can just calculate the answer for different values of `𝜀`; whereas, if we had used the direct method, we would need to run the solver once for each value of `𝜀`. - -## A Weakly Nonlinear Oscillator - -For the next example, we have chosen a simple example from a very important class of problems, the nonlinear oscillators. As we will see, perturbation theory has difficulty providing a good solution to this problem, but the process is instructive. This example closely follows the chapter 7.6 of *Nonlinear Dynamics and Chaos* by Steven Strogatz. - -The goal is to solve $\ddot{x} + 2\epsilon\dot{x} + x = 0$, where the dot signifies time-derivatives and the initial conditions are $x(0) = 0$ and $\dot{x}(0) = 1$. If $\epsilon = 0$, the problem reduces to the simple linear harmonic oscillator with the exact solution $x(t) = \sin(t)$. We follow the same steps as the previous example. - -```julia -n = 2 -@variables ϵ t y[0:n](t) ∂y[0:n] ∂∂y[0:n] -x = def_taylor(ϵ, y[2:end], y[1]) -∂x = def_taylor(ϵ, ∂y[2:end], ∂y[1]) -∂∂x = def_taylor(ϵ, ∂∂y[2:end], ∂∂y[1]) -``` - -This time we also need the first derivative terms. Continuing, - -```julia -eq = ∂∂x + 2*ϵ*∂x + x -eqs = collect_powers(eq, ϵ, 0:n) -vals = solve_coef(eqs, ∂∂y) -``` - -Next, we need to replace `∂`s and `∂∂`s with their **Symbolics.jl** counterparts: - -```julia -D = Differential(t) -subs1 = Dict(∂y[i] => D(y[i]) for i in eachindex(y)) -subs2 = Dict(∂∂y[i] => D(D(y[i])) for i in eachindex(y)) -subs = subs1 ∪ subs2 -eqs = [substitute(first(v), subs) ~ substitute(last(v), subs) for v in vals] -``` - -We continue with converting 'eqs' to an `ODEProblem`, solving it, and finally plot the results against the exact solution to the original problem, which is $x(t, \epsilon) = (1 - \epsilon)^{-1/2} e^{-\epsilon t} \sin((1- \epsilon^2)^{1/2}t)$, - -```julia -sys = ODESystem(eqs, t) -sys = ode_order_lowering(sys) -``` - -```julia -# the initial conditions -u0 = zeros(2n+2) -u0[3] = 1.0 # y₀ˍt - -prob = ODEProblem(sys, u0, (0, 50.0)) -sol = solve(prob; dtmax=0.01) - -X = 𝜀 -> sum([𝜀^(i-1) * sol[y[i]] for i in eachindex(y)]) -T = sol.t -Y = 𝜀 -> exp.(-𝜀*T) .* sin.(sqrt(1 - 𝜀^2)*T) / sqrt(1 - 𝜀^2) # exact solution - -plot(sol.t, [Y(0.1), X(0.1)]) -``` - -The figure is similar to Figure 7.6.2 in *Nonlinear Dynamics and Chaos*. The two curves fit well for the first couple of cycles, but then the perturbation method curve diverges from the true solution. The main reason is that the problem has two or more time-scales that introduce secular terms in the solution. One solution is to explicitly account for the two time scales and use an analytic method called *two-timing*. - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/perturbation/Project.toml b/tutorials/perturbation/Project.toml deleted file mode 100644 index 29dbc2f3..00000000 --- a/tutorials/perturbation/Project.toml +++ /dev/null @@ -1,2 +0,0 @@ -[deps] -SciMLTutorials = "30cb0354-2223-46a9-baa0-41bdcfbe0178" diff --git a/tutorials/type_handling/01-number_types.jmd b/tutorials/type_handling/01-number_types.jmd deleted file mode 100644 index 86edacea..00000000 --- a/tutorials/type_handling/01-number_types.jmd +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: Solving Equations in With Julia-Defined Types -author: Chris Rackauckas ---- - -One of the nice things about DifferentialEquations.jl is that it is designed with Julia's type system in mind. What this means is, if you have properly defined a Number type, you can use this number type in DifferentialEquations.jl's algorithms! [Note that this is restricted to the native algorithms of OrdinaryDiffEq.jl. The other solvers such as ODE.jl, Sundials.jl, and ODEInterface.jl are not compatible with some number systems.] - -DifferentialEquations.jl determines the numbers to use in its solvers via the types that are designated by `tspan` and the initial condition of the problem. It will keep the time values in the same type as tspan, and the solution values in the same type as the initial condition. [Note that adaptive timestepping requires that the time type is compaible with `sqrt` and `^` functions. Thus dt cannot be Integer or numbers like that if adaptive timestepping is chosen]. - -Let's solve the linear ODE first define an easy way to get ODEProblems for the linear ODE: - -```julia -using DifferentialEquations -f = (u,p,t) -> (p*u) -prob_ode_linear = ODEProblem(f,1/2,(0.0,1.0),1.01); -``` - -First let's solve it using Float64s. To do so, we just need to set u0 to a Float64 (which is done by the default) and dt should be a float as well. - -```julia -prob = prob_ode_linear -sol =solve(prob,Tsit5()) -println(sol) -``` - -Notice that both the times and the solutions were saved as Float64. Let's change the time to use rational values. Rationals are not compatible with adaptive time stepping since they do not have an L2 norm (this can be worked around by defining `internalnorm`, but rationals already explode in size!). To account for this, let's turn off adaptivity as well: - -```julia -prob = ODEProblem(f,1/2,(0//1,1//1),101//100); -sol = solve(prob,RK4(),dt=1//2^(6),adaptive=false) -println(sol) -``` - -Now let's do something fun. Let's change the solution to use `Rational{BigInt}` and print out the value at the end of the simulation. To do so, simply change the definition of the initial condition. - -```julia -prob = ODEProblem(f,BigInt(1)//BigInt(2),(0//1,1//1),101//100); -sol =solve(prob,RK4(),dt=1//2^(6),adaptive=false) -println(sol[end]) -``` - -That's one huge fraction! - -## Other Compatible Number Types - -#### BigFloats - -```julia -prob_ode_biglinear = ODEProblem(f,big(1.0)/big(2.0),(big(0.0),big(1.0)),big(1.01)) -sol =solve(prob_ode_biglinear,Tsit5()) -println(sol[end]) -``` - -#### DoubleFloats.jl - -There's are Float128-like types. Higher precision, but fixed and faster than arbitrary precision. - -```julia -using DoubleFloats -prob_ode_doublelinear = ODEProblem(f,Double64(1)/Double64(2),(Double64(0),Double64(1)),Double64(1.01)) -sol =solve(prob_ode_doublelinear,Tsit5()) -println(sol[end]) -``` - -#### ArbFloats - -These high precision numbers which are much faster than Bigs for less than 500-800 bits of accuracy. - -```julia -using ArbNumerics -prob_ode_arbfloatlinear = ODEProblem(f,ArbFloat(1)/ArbFloat(2),(ArbFloat(0.0),ArbFloat(1.0)),ArbFloat(1.01)) -sol =solve(prob_ode_arbfloatlinear,Tsit5()) -println(sol[end]) -``` - -## Incompatible Number Systems - -#### DecFP.jl - -Next let's try DecFP. DecFP is a fixed-precision decimals library which is made to give both performance but known decimals of accuracy. Having already installed DecFP with `]add DecFP`, I can run the following: - -```julia -using DecFP -prob_ode_decfplinear = ODEProblem(f,Dec128(1)/Dec128(2),(Dec128(0.0),Dec128(1.0)),Dec128(1.01)) -sol =solve(prob_ode_decfplinear,Tsit5()) -println(sol[end]); println(typeof(sol[end])) -``` - -#### Decimals.jl - -Install with `]add Decimals`. - -```julia -using Decimals -prob_ode_decimallinear = ODEProblem(f,[decimal("1.0")]./[decimal("2.0")],(0//1,1//1),decimal(1.01)) -sol =solve(prob_ode_decimallinear,RK4(),dt=1/2^(6)) #Fails -println(sol[end]); println(typeof(sol[end])) -``` - -At the time of writing this, Decimals are not compatible. This is not on DifferentialEquations.jl's end, it's on partly on Decimal's end since it is not a subtype of Number. Thus it's not recommended you use Decimals with DifferentialEquations.jl - -## Conclusion - -As you can see, DifferentialEquations.jl can use arbitrary Julia-defined number systems in its arithmetic. If you need 128-bit floats, i.e. a bit more precision but not arbitrary, DoubleFloats.jl is a very good choice! For arbitrary precision, ArbNumerics are the most feature-complete and give great performance compared to BigFloats, and thus I recommend their use when high-precision (less than 512-800 bits) is required. DecFP is a great library for high-performance decimal numbers and works well as well. Other number systems could use some modernization. - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/type_handling/02-uncertainties.jmd b/tutorials/type_handling/02-uncertainties.jmd deleted file mode 100644 index 1ef5dc24..00000000 --- a/tutorials/type_handling/02-uncertainties.jmd +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Numbers with Uncertainties -author: Mosè Giordano, Chris Rackauckas ---- - -The result of a measurement should be given as a number with an attached uncertainties, besides the physical unit, and all operations performed involving the result of the measurement should propagate the uncertainty, taking care of correlation between quantities. - -There is a Julia package for dealing with numbers with uncertainties: [`Measurements.jl`](https://github.com/JuliaPhysics/Measurements.jl). Thanks to Julia's features, `DifferentialEquations.jl` easily works together with `Measurements.jl` out-of-the-box. - -This notebook will cover some of the examples from the tutorial about classical Physics. - -## Caveat about `Measurement` type - -Before going on with the tutorial, we must point up a subtlety of `Measurements.jl` that you should be aware of: - -```julia -using Measurements - -5.23 ± 0.14 === 5.23 ± 0.14 -``` - -```julia -(5.23± 0.14) - (5.23 ± 0.14) -``` - -```julia -(5.23 ± 0.14) / (5.23 ± 0.14) -``` - -The two numbers above, even though have the same nominal value and the same uncertainties, are actually two different measurements that only by chance share the same figures and their difference and their ratio have a non-zero uncertainty. It is common in physics to get very similar, or even equal, results for a repeated measurement, but the two measurements are not the same thing. - -Instead, if you have *one measurement* and want to perform some operations involving it, you have to assign it to a variable: - -```julia -x = 5.23 ± 0.14 -x === x -``` - -```julia -x - x -``` - -```julia -x / x -``` - -## Radioactive Decay of Carbon-14 - -The rate of decay of carbon-14 is governed by a first order linear ordinary differential equation - -$$\frac{\mathrm{d}u(t)}{\mathrm{d}t} = -\frac{u(t)}{\tau}$$ - -where $\tau$ is the mean lifetime of carbon-14, which is related to the half-life $t_{1/2} = (5730 \pm 40)$ years by the relation $\tau = t_{1/2}/\ln(2)$. - -```julia -using DifferentialEquations, Measurements, Plots - -# Half-life and mean lifetime of radiocarbon, in years -t_12 = 5730 ± 40 -τ = t_12 / log(2) - -#Setup -u₀ = 1 ± 0 -tspan = (0.0, 10000.0) - -#Define the problem -radioactivedecay(u,p,t) = - u / τ - -#Pass to solver -prob = ODEProblem(radioactivedecay, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-8) - -# Analytic solution -u = exp.(- sol.t / τ) - -plot(sol.t, sol.u, label = "Numerical", xlabel = "Years", ylabel = "Fraction of Carbon-14") -plot!(sol.t, u, label = "Analytic") -``` - -The two curves are perfectly superimposed, indicating that the numerical solution matches the analytic one. We can check that also the uncertainties are correctly propagated in the numerical solution: - -```julia -println("Quantity of carbon-14 after ", sol.t[11], " years:") -println("Numerical: ", sol[11]) -println("Analytic: ", u[11]) -``` - -Both the value of the numerical solution and its uncertainty match the analytic solution within the requested tolerance. We can also note that close to 5730 years after the beginning of the decay (half-life of the radioisotope), the fraction of carbon-14 that survived is about 0.5. - -## Simple pendulum - -### Small angles approximation - -The next problem we are going to study is the simple pendulum in the approximation of small angles. We address this simplified case because there exists an easy analytic solution to compare. - -The differential equation we want to solve is - -$$\ddot{\theta} + \frac{g}{L} \theta = 0$$ - -where $g = (9.79 \pm 0.02)~\mathrm{m}/\mathrm{s}^2$ is the gravitational acceleration measured where the experiment is carried out, and $L = (1.00 \pm 0.01)~\mathrm{m}$ is the length of the pendulum. - -When you set up the problem for `DifferentialEquations.jl` remember to define the measurements as variables, as seen above. - -```julia -using DifferentialEquations, Measurements, Plots - -g = 9.79 ± 0.02; # Gravitational constants -L = 1.00 ± 0.01; # Length of the pendulum - -#Initial Conditions -u₀ = [0 ± 0, π / 60 ± 0.01] # Initial speed and initial angle -tspan = (0.0, 6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L)*θ -end - -#Pass to solvers -prob = ODEProblem(simplependulum, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-6) - -# Analytic solution -u = u₀[2] .* cos.(sqrt(g / L) .* sol.t) - -plot(sol.t, getindex.(sol.u, 2), label = "Numerical") -plot!(sol.t, u, label = "Analytic") -``` - -Also in this case there is a perfect superimposition between the two curves, including their uncertainties. - -We can also have a look at the difference between the two solutions: - -```julia -plot(sol.t, getindex.(sol.u, 2) .- u, label = "") -``` - -## Arbitrary amplitude - -Now that we know how to solve differential equations involving numbers with uncertainties we can solve the simple pendulum problem without any approximation. This time the differential equation to solve is the following: - -$$\ddot{\theta} + \frac{g}{L} \sin(\theta) = 0$$ - -```julia -g = 9.79 ± 0.02; # Gravitational constants -L = 1.00 ± 0.01; # Length of the pendulum - -#Initial Conditions -u₀ = [0 ± 0, π / 3 ± 0.02] # Initial speed and initial angle -tspan = (0.0, 6.3) - -#Define the problem -function simplependulum(du,u,p,t) - θ = u[1] - dθ = u[2] - du[1] = dθ - du[2] = -(g/L) * sin(θ) -end - -#Pass to solvers -prob = ODEProblem(simplependulum, u₀, tspan) -sol = solve(prob, Tsit5(), reltol = 1e-6) - -plot(sol.t, getindex.(sol.u, 2), label = "Numerical") -``` - -We note that in this case the period of the oscillations is not constant. - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/type_handling/03-unitful.jmd b/tutorials/type_handling/03-unitful.jmd deleted file mode 100644 index b47f9cce..00000000 --- a/tutorials/type_handling/03-unitful.jmd +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Unit Checked Arithmetic via Unitful.jl -author: Chris Rackauckas ---- - -Units and dimensional analysis are standard tools across the sciences for checking the correctness of your equation. However, most ODE solvers only allow for the equation to be in dimensionless form, leaving it up to the user to both convert the equation to a dimensionless form, punch in the equations, and hopefully not make an error along the way. - -DifferentialEquations.jl allows for one to use Unitful.jl to have unit-checked arithmetic natively in the solvers. Given the dispatch implementation of the Unitful, this has little overhead. - -## Using Unitful - -To use Unitful, you need to have the package installed. Then you can add units to your variables. For example: - -```julia; wrap=false -using Unitful -t = 1.0u"s" -``` - -Notice that `t` is a variable with units in seconds. If we make another value with seconds, they can add - -```julia; wrap=false -t2 = 1.02u"s" -t+t2 -``` - -and they can multiply: - -```julia; wrap=false -t*t2 -``` - -You can even do rational roots: - -```julia; wrap=false -sqrt(t) -``` - -Many operations work. These operations will check to make sure units are correct, and will throw an error for incorrect operations: - -```julia; wrap=false -t + sqrt(t) -``` - -## Using Unitful with DifferentialEquations.jl - -Just like with other number systems, you can choose the units for your numbers by simply specifying the units of the initial condition and the timestep. For example, to solve the linear ODE where the variable has units of Newton's and `t` is in Seconds, we would use: - -```julia; wrap=false -using DifferentialEquations -f = (y,p,t) -> 0.5*y -u0 = 1.5u"N" -prob = ODEProblem(f,u0,(0.0u"s",1.0u"s")) -sol = solve(prob,Tsit5()) -``` - -Notice that we recieved a unit mismatch error. This is correctly so! Remember that for an ODE: - -$$\frac{dy}{dt} = f(t,y)$$ - -we must have that `f` is a rate, i.e. `f` is a change in `y` per unit time. So we need to fix the units of `f` in our example to be `N/s`. Notice that we then do not receive an error if we do the following: - -```julia; wrap=false -f = (y,p,t) -> 0.5*y/3.0u"s" -prob = ODEProblem(f,u0,(0.0u"s",1.0u"s")) -sol = solve(prob,Tsit5()) -``` - -This gives a a normal solution object. Notice that the values are all with the correct units: - -```julia; wrap=false -print(sol[:]) -``` - -We can plot the solution by removing the units: - -```julia; wrap=false -using Plots -gr() -plot(ustrip(sol.t),ustrip(sol[:]),lw=3) -``` - -```julia, echo = false, skip="notebook" -using SciMLTutorials -SciMLTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) -``` diff --git a/tutorials/type_handling/Project.toml b/tutorials/type_handling/Project.toml deleted file mode 100644 index 3653d082..00000000 --- a/tutorials/type_handling/Project.toml +++ /dev/null @@ -1,22 +0,0 @@ -[deps] -ArbNumerics = "7e558dbc-694d-5a72-987c-6f4ebed21442" -DecFP = "55939f99-70c6-5e9b-8bb0-5071ed7d61fd" -Decimals = "abce61dc-4473-55a0-ba07-351d65e31d42" -DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" -DoubleFloats = "497a8b3b-efae-58df-a0af-a86822472b78" -Measurements = "eff96d63-e80a-5855-80a2-b1b0885c5ab7" -OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" -SciMLTutorials = "30cb0354-2223-46a9-baa0-41bdcfbe0178" - -[compat] -ArbNumerics = "1.0" -DecFP = "0.4, 1.0" -Decimals = "0.4" -DifferentialEquations = "6.8" -DoubleFloats = "0.9, 1.0" -Measurements = "2.1" -OrdinaryDiffEq = "5.23" -Plots = "0.27, 0.28, 0.29, 1.0" -Unitful = "1" diff --git a/weave_tutorials.jl b/weave_tutorials.jl index 0680c811..a6052620 100644 --- a/weave_tutorials.jl +++ b/weave_tutorials.jl @@ -7,7 +7,7 @@ if isdir(target) println("Weaving the $(target) folder") SciMLTutorials.weave_folder(target) elseif isfile(target) - folder = dirname(target) + folder = dirname(target)[11:end] # remove the tutorials/ file = basename(target) println("Weaving $(folder)/$(file)") SciMLTutorials.weave_file(folder, file)