diff --git a/.github/workflows/CompatHelper.yml b/.github/workflows/CompatHelper.yml new file mode 100644 index 00000000..d1162ce1 --- /dev/null +++ b/.github/workflows/CompatHelper.yml @@ -0,0 +1,26 @@ +name: CompatHelper + +on: + schedule: + - cron: '00 * * * *' + issues: + types: [opened, reopened] + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + matrix: + julia-version: [1.2.0] + julia-arch: [x86] + os: [ubuntu-latest] + steps: + - uses: julia-actions/setup-julia@latest + with: + version: ${{ matrix.julia-version }} + - name: Pkg.add("CompatHelper") + run: julia -e 'using Pkg; Pkg.add("CompatHelper")' + - name: CompatHelper.main() + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: julia -e 'using CompatHelper; CompatHelper.main()' \ No newline at end of file diff --git a/CITATION.bib b/CITATION.bib index 33c2d20f..c5ed29ff 100644 --- a/CITATION.bib +++ b/CITATION.bib @@ -1,7 +1,7 @@ @article{DifferentialEquations.jl-2017, author = {Rackauckas, Christopher and Nie, Qing}, doi = {10.5334/jors.151}, - journal = {The Journal of Open Source Software}, + journal = {The Journal of Open Research Software}, keywords = {Applied Mathematics}, note = {Exported from https://app.dimensions.ai on 2019/05/05}, number = {1}, diff --git a/Project.toml b/Project.toml index 2262d36a..7e21deaf 100644 --- a/Project.toml +++ b/Project.toml @@ -1,9 +1,10 @@ name = "DiffEqTutorials" uuid = "6d1b261a-3be8-11e9-3f2f-0b112a9a8436" authors = ["Chris Rackauckas "] -version = "0.1.0" +version = "0.2.0" [deps] +AlgebraicMultigrid = "2169fc97-5a83-5252-b627-83903c6c433c" ArbNumerics = "7e558dbc-694d-5a72-987c-6f4ebed21442" BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" CUDAnative = "be33ccc6-a3ff-5ff2-a52e-74243cff1e17" @@ -15,6 +16,7 @@ DiffEqBayes = "ebbdde9d-f333-5424-9be2-dbf1e9acfb5e" DiffEqBiological = "eb300fae-53e8-50a0-950c-e21f52c2b7e0" DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" DiffEqDevTools = "f3b72e0c-5b89-59e1-b016-84e28bfd966d" +DiffEqOperators = "9fdde737-9c7f-55bf-ade8-46b3f136cc48" DiffEqParamEstim = "1130ab10-4a5a-5621-a13d-e4788d82bd4c" DiffEqPhysics = "055956cb-9e8b-5191-98cc-73ae4a59e68a" DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" @@ -35,6 +37,8 @@ Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" PyPlot = "d330b81b-6aea-500a-939a-2ce795aea3ee" RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" +SparseDiffTools = "47a9eef4-7e08-11e9-0b38-333d64bd3804" +SparsityDetection = "684fba80-ace3-11e9-3d08-3bc7ed6f96df" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" Sundials = "c3572dad-4567-51f8-b174-8c6c989267f4" @@ -42,4 +46,41 @@ Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" Weave = "44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9" [compat] +AlgebraicMultigrid = "0.2" +ArbNumerics = "1.0" +BenchmarkTools = "0.4" +CUDAnative = "2.5" +Cairo = "0.8, 1.0" +CuArrays = "1.4" +DecFP = "0.4" +Decimals = "0.4" +DiffEqBayes = "2.1" +DiffEqBiological = "4.0" +DiffEqCallbacks = "2.9" +DiffEqDevTools = "2.15" +DiffEqOperators = "4.3" +DiffEqParamEstim = "1.8" +DiffEqPhysics = "3.2" +DifferentialEquations = "6.8" +Distributions = "0.21" +DoubleFloats = "0.9, 1.0" +ForwardDiff = "0.10" +IJulia = "1.20" +Latexify = "0.12" +Measurements = "2.1" +ModelingToolkit = "0.9, 0.10, 1.0" +NLsolve = "4.2" +Optim = "0.19" +OrdinaryDiffEq = "5.23" +ParameterizedFunctions = "4.2" +Plots = "0.27, 0.28" +PyPlot = "2.8" +RecursiveArrayTools = "1.0" +SparseDiffTools = "0.10, 1.0" +SparsityDetection = "0.1" +StaticArrays = "0.10, 0.11, 0.12" +StatsPlots = "0.12, 0.13" +Sundials = "3.8" +Unitful = "0.17, 0.18" +Weave = "0.9" julia = "1" diff --git a/README.md b/README.md index a015aceb..8217f58d 100644 --- a/README.md +++ b/README.md @@ -5,8 +5,8 @@ DiffEqTutorials.jl holds PDFs, webpages, and interactive Jupyter notebooks showing how to utilize the software in the JuliaDiffEq ecosystem. This set of tutorials was made to complement the -[documentation](http://docs.juliadiffeq.org/latest/) and the -[devdocs](http://devdocs.juliadiffeq.org/latest/) +[documentation](http://docs.juliadiffeq.org/dev/) and the +[devdocs](http://devdocs.juliadiffeq.org/dev/) by providing practical examples of the concepts. For more details, please consult the docs. @@ -42,6 +42,7 @@ DiffEqTutorials.open_notebooks() - [Conditional Dosing Example](http://tutorials.juliadiffeq.org/html/models/02-conditional_dosing.html) - [DiffEqBiological Tutorial I: Introduction](http://tutorials.juliadiffeq.org/html/models/03-diffeqbio_I_introduction.html) - [DiffEqBiological Tutorial II: Network Properties API](http://tutorials.juliadiffeq.org/html/models/04-diffeqbio_II_networkproperties.html) + - [DiffEqBiological Tutorial III: Steady-States and Bifurcations](http://tutorials.juliadiffeq.org/html/models/04b-diffeqbio_III_steadystates.html) - [Kepler Problem Orbit](http://tutorials.juliadiffeq.org/html/models/05-kepler_problem.html) - [Bayesian Inference of Pendulum Parameters](http://tutorials.juliadiffeq.org/html/models/06-pendulum_bayesian_inference.html) - Advanced ODE Features @@ -55,6 +56,7 @@ DiffEqTutorials.open_notebooks() - [Unit Check Arithmetic via Unitful.jl](http://tutorials.juliadiffeq.org/html/type_handling/03-unitful.html) - Advanced - [A 2D Cardiac Electrophysiology Model (CUDA-accelerated PDE solver)](http://tutorials.juliadiffeq.org/html/advanced/01-beeler_reuter.html) + - [Solving Stiff Equations](http://tutorials.juliadiffeq.org/html/advanced/02-advanced_ODE_solving.html) ## Contributing diff --git a/REQUIRE b/REQUIRE deleted file mode 100644 index e9e3d679..00000000 --- a/REQUIRE +++ /dev/null @@ -1,2 +0,0 @@ -Weave -IJulia diff --git a/html/advanced/02-advanced_ODE_solving.html b/html/advanced/02-advanced_ODE_solving.html new file mode 100644 index 00000000..5e86d7f5 --- /dev/null +++ b/html/advanced/02-advanced_ODE_solving.html @@ -0,0 +1,1614 @@ + + + + + + Solving Stiff Equations + + + + + + + + + + + + + + + + + +
+
+
+ +
+

Solving Stiff Equations

+
Chris Rackauckas
+ +
+ +

This tutorial is for getting into the extra features for solving stiff ordinary differential equations in an efficient manner. Solving stiff ordinary differential equations requires specializing the linear solver on properties of the Jacobian in order to cut down on the O(n^3) linear solve and the O(n^2) back-solves. Note that these same functions and controls also extend to stiff SDEs, DDEs, DAEs, etc.

+

Code Optimization for Differential Equations

+

Writing Efficient Code

+

For a detailed tutorial on how to optimize one's DifferentialEquations.jl code, please see the Optimizing DiffEq Code tutorial.

+

Choosing a Good Solver

+

Choosing a good solver is required for getting top notch speed. General recommendations can be found on the solver page (for example, the ODE Solver Recommendations). The current recommendations can be simplified to a Rosenbrock method (Rosenbrock23 or Rodas5) for smaller (<50 ODEs) problems, ESDIRK methods for slightly larger (TRBDF2 or KenCarp4 for <2000 ODEs), and Sundials CVODE_BDF for even larger problems. lsoda from LSODA.jl is generally worth a try.

+

More details on the solver to choose can be found by benchmarking. See the DiffEqBenchmarks to compare many solvers on many problems.

+

Check Out the Speed FAQ

+

See this FAQ for information on common pitfalls and how to improve performance.

+

Setting Up Your Julia Installation for Speed

+

Julia uses an underlying BLAS implementation for its matrix multiplications and factorizations. This library is automatically multithreaded and accelerates the internal linear algebra of DifferentialEquations.jl. However, for optimality, you should make sure that the number of BLAS threads that you are using matches the number of physical cores and not the number of logical cores. See this issue for more details.

+

To check the number of BLAS threads, use:

+ + +
+ccall((:openblas_get_num_threads64_, Base.libblas_name), Cint, ())
+
+ + +
+8
+
+ + +

If I want to set this directly to 4 threads, I would use:

+ + +
+using LinearAlgebra
+LinearAlgebra.BLAS.set_num_threads(4)
+
+ + + +

Additionally, in some cases Intel's MKL might be a faster BLAS than the standard BLAS that ships with Julia (OpenBLAS). To switch your BLAS implementation, you can use MKL.jl which will accelerate the linear algebra routines. Please see the package for the limitations.

+

Use Accelerator Hardware

+

When possible, use GPUs. If your ODE system is small and you need to solve it with very many different parameters, see the ensembles interface and DiffEqGPU.jl. If your problem is large, consider using a CuArray for the state to allow for GPU-parallelism of the internal linear algebra.

+

Speeding Up Jacobian Calculations

+

When one is using an implicit or semi-implicit differential equation solver, the Jacobian must be built at many iterations and this can be one of the most expensive steps. There are two pieces that must be optimized in order to reach maximal efficiency when solving stiff equations: the sparsity pattern and the construction of the Jacobian. The construction is filling the matrix J with values, while the sparsity pattern is what J to use.

+

The sparsity pattern is given by a prototype matrix, the jac_prototype, which will be copied to be used as J. The default is for J to be a Matrix, i.e. a dense matrix. However, if you know the sparsity of your problem, then you can pass a different matrix type. For example, a SparseMatrixCSC will give a sparse matrix. Additionally, structured matrix types like Tridiagonal, BandedMatrix (from BandedMatrices.jl), BlockBandedMatrix (from BlockBandedMatrices.jl), and more can be given. DifferentialEquations.jl will internally use this matrix type, making the factorizations faster by utilizing the specialized forms.

+

For the construction, there are 3 ways to fill J:

+
    +
  • The default, which uses normal finite/automatic differentiation

    +
  • +
  • A function jac(J,u,p,t) which directly computes the values of J

    +
  • +
  • A colorvec which defines a sparse differentiation scheme.

    +
  • +
+

We will now showcase how to make use of this functionality with growing complexity.

+

Declaring Jacobian Functions

+

Let's solve the Rosenbrock equations:

+

\[ +\begin{align} +dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ +dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ +dy_3 &= 3*10^7 y_{3}^2 \\ +\end{align} +\]

+

In order to reduce the Jacobian construction cost, one can describe a Jacobian function by using the jac argument for the ODEFunction. First, let's do a standard ODEProblem:

+ + +
+using DifferentialEquations
+function rober(du,u,p,t)
+  y₁,y₂,y₃ = u
+  k₁,k₂,k₃ = p
+  du[1] = -k₁*y₁+k₃*y₂*y₃
+  du[2] =  k₁*y₁-k₂*y₂^2-k₃*y₂*y₃
+  du[3] =  k₂*y₂^2
+  nothing
+end
+prob = ODEProblem(rober,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
+sol = solve(prob,Rosenbrock23())
+
+using Plots
+plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1))
+
+ + + + + +
+using BenchmarkTools
+@btime solve(prob)
+
+ + +
+409.600 μs (3063 allocations: 161.83 KiB)
+retcode: Success
+Interpolation: Automatic order switching interpolation
+t: 115-element Array{Float64,1}:
+      0.0                  
+      0.0014148468219250373
+      0.0020449182545311173
+      0.0031082402716566307
+      0.004077787050059496 
+      0.005515332443361059 
+      0.007190040962774541 
+      0.009125372578778032 
+      0.011053912492732977 
+      0.012779077276958607 
+      ⋮                    
+  47335.56357690261        
+  52732.01292853374        
+  58693.72991412389        
+  65278.000210850696       
+  72548.20206513454        
+  80574.5643369749         
+  89435.05301092885        
+  99216.41264599326        
+ 100000.0                  
+u: 115-element Array{Array{Float64,1},1}:
+ [1.0, 0.0, 0.0]                                                    
+ [0.9999434113193613, 3.283958829839966e-5, 2.3749092340286502e-5]  
+ [0.9999182177783585, 3.55426801363446e-5, 4.6239541505020656e-5]   
+ [0.999875715036629, 3.6302469334849744e-5, 8.798249403609506e-5]   
+ [0.9998369766077329, 3.646280308115459e-5, 0.00012656058918590176] 
+ [0.9997795672444667, 3.646643085642237e-5, 0.0001839663246768369]  
+ [0.9997127287139348, 3.6447279992896e-5, 0.00025082400607228316]   
+ [0.9996355450022019, 3.6366816179962866e-5, 0.00032808818161818775]
+ [0.9995586925734838, 3.6018927453312764e-5, 0.00040528849906290045]
+ [0.9994899965196854, 3.468694637786026e-5, 0.000475316533936808]   
+ ⋮                                                                  
+ [0.03394368168613229, 1.404798439362035e-7, 0.9660561778340258]    
+ [0.031028975539652698, 1.280360743781007e-7, 0.9689708964242754]   
+ [0.02835436357223889, 1.1668209524677941e-7, 0.9716455197456683]   
+ [0.025901326001934923, 1.0632276689411095e-7, 0.9740985676753005]  
+ [0.023652545345805354, 9.687112514942483e-8, 0.9763473577830714]   
+ [0.021591862129552664, 8.824767963573306e-8, 0.9784080496227692]   
+ [0.019704225538717677, 8.037977048382674e-8, 0.9802956940815135]   
+ [0.017975641463053707, 7.320098240041474e-8, 0.9820242853359655]   
+ [0.017850566233695766, 7.268384360678819e-8, 0.9821493610824623]
+
+ + +

Now we want to add the Jacobian. First we have to derive the Jacobian $\frac{df_i}{du_j}$ which is J[i,j]. From this we get:

+ + +
+function rober_jac(J,u,p,t)
+  y₁,y₂,y₃ = u
+  k₁,k₂,k₃ = p
+  J[1,1] = k₁ * -1
+  J[2,1] = k₁
+  J[3,1] = 0
+  J[1,2] = y₃ * k₃
+  J[2,2] = y₂ * k₂ * -2 + y₃ * k₃ * -1
+  J[3,2] = y₂ * 2 * k₂
+  J[1,3] = k₃ * y₂
+  J[2,3] = k₃ * y₂ * -1
+  J[3,3] = 0
+  nothing
+end
+f = ODEFunction(rober, jac=rober_jac)
+prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
+
+@btime solve(prob_jac)
+
+ + +
+317.900 μs (2599 allocations: 153.11 KiB)
+retcode: Success
+Interpolation: Automatic order switching interpolation
+t: 115-element Array{Float64,1}:
+      0.0                  
+      0.0014148468219250373
+      0.0020449182545311173
+      0.0031082402716566307
+      0.004077787050059496 
+      0.005515332443361059 
+      0.007190040962774541 
+      0.009125372578778032 
+      0.011053912492732977 
+      0.012779077276958607 
+      ⋮                    
+  45964.060340548356       
+  51219.40381376205        
+  57025.01899700374        
+  63436.021374561584       
+  70513.1073617524         
+  78323.14229130604        
+  86939.82338876331        
+  96444.41085674686        
+ 100000.0                  
+u: 115-element Array{Array{Float64,1},1}:
+ [1.0, 0.0, 0.0]                                                    
+ [0.9999434113193613, 3.283958829839966e-5, 2.3749092340286502e-5]  
+ [0.9999182177783585, 3.55426801363446e-5, 4.6239541505020656e-5]   
+ [0.999875715036629, 3.6302469334849744e-5, 8.798249403609506e-5]   
+ [0.9998369766077329, 3.646280308115459e-5, 0.00012656058918590176] 
+ [0.9997795672444667, 3.646643085642237e-5, 0.0001839663246768369]  
+ [0.9997127287139348, 3.6447279992896e-5, 0.00025082400607228316]   
+ [0.9996355450022019, 3.6366816179962866e-5, 0.00032808818161818775]
+ [0.9995586925734838, 3.6018927453312764e-5, 0.00040528849906290045]
+ [0.9994899965196854, 3.468694637786026e-5, 0.000475316533936808]   
+ ⋮                                                                  
+ [0.03478048133177493, 1.4406682005231008e-7, 0.9652193746014031]   
+ [0.03179591062189176, 1.313038656880417e-7, 0.9682039580742408]    
+ [0.029057356622057315, 1.1966100432939363e-7, 0.9709425237169371]  
+ [0.02654597011713668, 1.0904070990251299e-7, 0.9734539208421517]   
+ [0.024244118287194777, 9.935385522693504e-8, 0.9757557823589477]   
+ [0.022135344621501105, 9.05190025093182e-8, 0.9778645648594945]    
+ [0.02020432071854, 8.246174295748071e-8, 0.9797955968197154]       
+ [0.018436796681356796, 7.511410189106845e-8, 0.9815631282045397]   
+ [0.01785426048218692, 7.269900678199638e-8, 0.9821456668188047]
+
+ + +

Automatic Derivation of Jacobian Functions

+

But that was hard! If you want to take the symbolic Jacobian of numerical code, we can make use of ModelingToolkit.jl to symbolicify the numerical code and do the symbolic calculation and return the Julia code for this.

+ + +
+using ModelingToolkit
+de = modelingtoolkitize(prob)
+ModelingToolkit.generate_jacobian(de...)[2] # Second is in-place
+
+ + +
+:((##MTIIPVar#392, u, p, t)->begin
+          #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils
+.jl:65 =#
+          #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils
+.jl:66 =#
+          let (x₁, x₂, x₃, α₁, α₂, α₃) = (u[1], u[2], u[3], p[1], p[2], p[3
+])
+              ##MTIIPVar#392[1] = α₁ * -1
+              ##MTIIPVar#392[2] = α₁
+              ##MTIIPVar#392[3] = 0
+              ##MTIIPVar#392[4] = x₃ * α₃
+              ##MTIIPVar#392[5] = x₂ * α₂ * -2 + x₃ * α₃ * -1
+              ##MTIIPVar#392[6] = x₂ * 2 * α₂
+              ##MTIIPVar#392[7] = α₃ * x₂
+              ##MTIIPVar#392[8] = α₃ * x₂ * -1
+              ##MTIIPVar#392[9] = 0
+          end
+          #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils
+.jl:67 =#
+          nothing
+      end)
+
+ + +

which outputs:

+ + + +
+:((##MTIIPVar#376, u, p, t)->begin
+          #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:65 =#
+          #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:66 =#
+          let (x₁, x₂, x₃, α₁, α₂, α₃) = (u[1], u[2], u[3], p[1], p[2], p[3])
+              ##MTIIPVar#376[1] = α₁ * -1
+              ##MTIIPVar#376[2] = α₁
+              ##MTIIPVar#376[3] = 0
+              ##MTIIPVar#376[4] = x₃ * α₃
+              ##MTIIPVar#376[5] = x₂ * α₂ * -2 + x₃ * α₃ * -1
+              ##MTIIPVar#376[6] = x₂ * 2 * α₂
+              ##MTIIPVar#376[7] = α₃ * x₂
+              ##MTIIPVar#376[8] = α₃ * x₂ * -1
+              ##MTIIPVar#376[9] = 0
+          end
+          #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:67 =#
+          nothing
+      end)
+
+ + +

Now let's use that to give the analytical solution Jacobian:

+ + +
+jac = eval(ModelingToolkit.generate_jacobian(de...)[2])
+f = ODEFunction(rober, jac=jac)
+prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
+
+ + +
+ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
+timespan: (0.0, 100000.0)
+u0: [1.0, 0.0, 0.0]
+
+ + +

Declaring a Sparse Jacobian

+

Jacobian sparsity is declared by the jac_prototype argument in the ODEFunction. Note that you should only do this if the sparsity is high, for example, 0.1% of the matrix is non-zeros, otherwise the overhead of sparse matrices can be higher than the gains from sparse differentiation!

+

But as a demonstration, let's build a sparse matrix for the Rober problem. We can do this by gathering the I and J pairs for the non-zero components, like:

+ + +
+I = [1,2,1,2,3,1,2]
+J = [1,1,2,2,2,3,3]
+using SparseArrays
+jac_prototype = sparse(I,J,1.0)
+
+ + +
+3×3 SparseArrays.SparseMatrixCSC{Float64,Int64} with 7 stored entries:
+  [1, 1]  =  1.0
+  [2, 1]  =  1.0
+  [1, 2]  =  1.0
+  [2, 2]  =  1.0
+  [3, 2]  =  1.0
+  [1, 3]  =  1.0
+  [2, 3]  =  1.0
+
+ + +

Now this is the sparse matrix prototype that we want to use in our solver, which we then pass like:

+ + +
+f = ODEFunction(rober, jac=jac, jac_prototype=jac_prototype)
+prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
+
+ + +
+ODEProblem with uType Array{Float64,1} and tType Float64. In-place: true
+timespan: (0.0, 100000.0)
+u0: [1.0, 0.0, 0.0]
+
+ + +

Automatic Sparsity Detection

+

One of the useful companion tools for DifferentialEquations.jl is SparsityDetection.jl. This allows for automatic declaration of Jacobian sparsity types. To see this in action, let's look at the 2-dimensional Brusselator equation:

+ + +
+const N = 32
+const xyd_brusselator = range(0,stop=1,length=N)
+brusselator_f(x, y, t) = (((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) * (t >= 1.1) * 5.
+limit(a, N) = a == N+1 ? 1 : a == 0 ? N : a
+function brusselator_2d_loop(du, u, p, t)
+  A, B, alpha, dx = p
+  alpha = alpha/dx^2
+  @inbounds for I in CartesianIndices((N, N))
+    i, j = Tuple(I)
+    x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]]
+    ip1, im1, jp1, jm1 = limit(i+1, N), limit(i-1, N), limit(j+1, N), limit(j-1, N)
+    du[i,j,1] = alpha*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) +
+                B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t)
+    du[i,j,2] = alpha*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) +
+                A*u[i,j,1] - u[i,j,1]^2*u[i,j,2]
+    end
+end
+p = (3.4, 1., 10., step(xyd_brusselator))
+
+ + +
+(3.4, 1.0, 10.0, 0.03225806451612903)
+
+ + +

Given this setup, we can give and example input and output and call sparsity! on our function with the example arguments and it will kick out a sparse matrix with our pattern, that we can turn into our jac_prototype.

+ + +
+using SparsityDetection, SparseArrays
+input = rand(32,32,2)
+output = similar(input)
+sparsity_pattern = sparsity!(brusselator_2d_loop,output,input,p,0.0)
+
+ + +
+Explored path: SparsityDetection.Path(Bool[], 1)
+
+ + + +
+jac_sparsity = Float64.(sparse(sparsity_pattern))
+
+ + +
+2048×2048 SparseArrays.SparseMatrixCSC{Float64,Int64} with 12288 stored ent
+ries:
+  [1   ,    1]  =  1.0
+  [2   ,    1]  =  1.0
+  [32  ,    1]  =  1.0
+  [33  ,    1]  =  1.0
+  [993 ,    1]  =  1.0
+  [1025,    1]  =  1.0
+  [1   ,    2]  =  1.0
+  [2   ,    2]  =  1.0
+  [3   ,    2]  =  1.0
+  ⋮
+  [2015, 2047]  =  1.0
+  [2046, 2047]  =  1.0
+  [2047, 2047]  =  1.0
+  [2048, 2047]  =  1.0
+  [1024, 2048]  =  1.0
+  [1056, 2048]  =  1.0
+  [2016, 2048]  =  1.0
+  [2017, 2048]  =  1.0
+  [2047, 2048]  =  1.0
+  [2048, 2048]  =  1.0
+
+ + +

Let's double check what our sparsity pattern looks like:

+ + +
+using Plots
+spy(jac_sparsity,markersize=1,colorbar=false,color=:deep)
+
+ + + + +

That's neat, and would be tedius to build by hand! Now we just pass it to the ODEFunction like as before:

+ + +
+f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity)
+
+ + +
+(::DiffEqBase.ODEFunction{true,typeof(Main.WeaveSandBox0.brusselator_2d_loo
+p),LinearAlgebra.UniformScaling{Bool},Nothing,Nothing,Nothing,SparseArrays.
+SparseMatrixCSC{Float64,Int64},Nothing,Nothing,Nothing,Nothing,Nothing}) (g
+eneric function with 7 methods)
+
+ + +

Build the ODEProblem:

+ + +
+function init_brusselator_2d(xyd)
+  N = length(xyd)
+  u = zeros(N, N, 2)
+  for I in CartesianIndices((N, N))
+    x = xyd[I[1]]
+    y = xyd[I[2]]
+    u[I,1] = 22*(y*(1-y))^(3/2)
+    u[I,2] = 27*(x*(1-x))^(3/2)
+  end
+  u
+end
+u0 = init_brusselator_2d(xyd_brusselator)
+prob_ode_brusselator_2d = ODEProblem(brusselator_2d_loop,
+                                     u0,(0.,11.5),p)
+
+prob_ode_brusselator_2d_sparse = ODEProblem(f,
+                                     u0,(0.,11.5),p)
+
+ + +
+ODEProblem with uType Array{Float64,3} and tType Float64. In-place: true
+timespan: (0.0, 11.5)
+u0: [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715
+876 … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371
+586 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
+
+[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
+196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
+.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]
+
+ + +

Now let's see how the version with sparsity compares to the version without:

+ + +
+@btime solve(prob_ode_brusselator_2d,save_everystep=false)
+
+ + +
+43.298 s (7317 allocations: 70.12 MiB)
+
+ + + +
+@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false)
+
+ + +
+23.900 s (367199 allocations: 896.99 MiB)
+retcode: Success
+Interpolation: 1st order linear
+t: 2-element Array{Float64,1}:
+  0.0
+ 11.5
+u: 2-element Array{Array{Float64,3},1}:
+ [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715876
+ … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371586
+ 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
+
+[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
+196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
+.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]                 
+                                                                           
+                                                                           
+                                                 
+ [3.2183315970074036 3.2183043434767553 … 3.2184226343677738 3.218371247341
+7185; 3.2183804713733872 3.2183499447177057 … 3.2184831183646856 3.21842504
+7282479; … ; 3.218246108233481 3.2182241729222354 … 3.2183185170391946 3.21
+82778079052787; 3.2182863194790094 3.218261945024488 … 3.218367227674788 3.
+218321653767132]
+
+[2.364108254063361 2.364109732940303 … 2.364103502720394 2.3641061660225517
+; 2.364105345047017 2.3641069231419443 … 2.3641002347797833 2.3641031002634
+882; … ; 2.364113451334332 2.3641147252834216 … 2.364109297958111 2.3641116
+159339757; 2.3641109923384915 2.364112358364487 … 2.3641065653101885 2.3641
+090439583214]
+
+ + +

Declaring Color Vectors for Fast Construction

+

If you cannot directly define a Jacobian function, you can use the colorvec to speed up the Jacobian construction. What the colorvec does is allows for calculating multiple columns of a Jacobian simultaniously by using the sparsity pattern. An explanation of matrix coloring can be found in the MIT 18.337 Lecture Notes.

+

To perform general matrix coloring, we can use SparseDiffTools.jl. For example, for the Brusselator equation:

+ + +
+using SparseDiffTools
+colorvec = matrix_colors(jac_sparsity)
+@show maximum(colorvec)
+
+ + +
+maximum(colorvec) = 12
+12
+
+ + +

This means that we can now calculate the Jacobian in 12 function calls. This is a nice reduction from 2048 using only automated tooling! To now make use of this inside of the ODE solver, you simply need to declare the colorvec:

+ + +
+f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity,
+                                    colorvec=colorvec)
+prob_ode_brusselator_2d_sparse = ODEProblem(f,
+                                     init_brusselator_2d(xyd_brusselator),
+                                     (0.,11.5),p)
+@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false)
+
+ + +
+5.184 s (19039 allocations: 881.07 MiB)
+retcode: Success
+Interpolation: 1st order linear
+t: 2-element Array{Float64,1}:
+  0.0
+ 11.5
+u: 2-element Array{Array{Float64,3},1}:
+ [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715876
+ … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371586
+ 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
+
+[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
+196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
+.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]                 
+                                                                           
+                                                                           
+                                                   
+ [3.2183373918177796 3.2183101409241526 … 3.2184284167956267 3.218377034566
+1604; 3.2183862623036537 3.218355740108313 … 3.218488896905827 3.2184308308
+09056; … ; 3.218251904608678 3.2182299624517134 … 3.2183243097118095 3.2182
+835995190024; 3.2182921103674285 3.218267738694001 … 3.2183730157748163 3.2
+183274412034346]
+
+[2.3641011711912463 2.364102627665652 … 2.364096424152248 2.364099082779794
+5; 2.3640982676790627 2.3640998304296703 … 2.3640931617281944 2.36409602465
+74303; … ; 2.364106344376436 2.3641076180295504 … 2.364102206048339 2.36410
+45205022344; 2.364103899515714 2.3641052552245445 … 2.3640994754056486 2.36
+41019485955153]
+
+ + +

Notice the massive speed enhancement!

+

Defining Linear Solver Routines and Jacobian-Free Newton-Krylov

+

A completely different way to optimize the linear solvers for large sparse matrices is to use a Krylov subpsace method. This requires choosing a linear solver for changing to a Krylov method. Optionally, one can use a Jacobian-free operator to reduce the memory requirements.

+

Declaring a Jacobian-Free Newton-Krylov Implementation

+

To swap the linear solver out, we use the linsolve command and choose the GMRES linear solver.

+ + +
+@btime solve(prob_ode_brusselator_2d,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)
+
+ + +
+236.859 s (1266049 allocations: 120.80 MiB)
+
+ + + +
+@btime solve(prob_ode_brusselator_2d_sparse,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)
+
+ + +
+4.175 s (1327264 allocations: 59.92 MiB)
+retcode: Success
+Interpolation: 1st order linear
+t: 2-element Array{Float64,1}:
+  0.0
+ 11.5
+u: 2-element Array{Array{Float64,3},1}:
+ [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715876
+ … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371586
+ 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
+
+[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
+196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
+.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]                 
+                                                                           
+                                                                           
+                                              
+ [2.8494040430340677 2.849376568123844 … 2.849495874352271 2.84944397101885
+77; 2.8494535304517883 2.8494226751421077 … 2.849557062218097 2.84949828863
+504; … ; 2.8493164846505232 2.849294110741412 … 2.8493903195873105 2.849349
+0728548774; 2.849357928360968 2.84933329062441 … 2.8494396335090886 2.84939
+36648688254]
+
+[2.8157264541468283 2.8157283534566693 … 2.8157208829524296 2.8157236606184
+397; 2.8157225956336194 2.815724834275517 … 2.815716958084277 2.81571990149
+71726; … ; 2.815734632998308 2.8157368388547357 … 2.8157282527277308 2.8157
+31663143054; 2.815730494353417 2.815732379564653 … 2.8157247313047327 2.815
+7277764523414]
+
+ + +

For more information on linear solver choices, see the linear solver documentation.

+

On this problem, handling the sparsity correctly seemed to give much more of a speedup than going to a Krylov approach, but that can be dependent on the problem (and whether a good preconditioner is found).

+

We can also enhance this by using a Jacobian-Free implementation of f'(x)*v. To define the Jacobian-Free operator, we can use DiffEqOperators.jl to generate an operator JacVecOperator such that Jv*v performs f'(x)*v without building the Jacobian matrix.

+ + +
+using DiffEqOperators
+Jv = JacVecOperator(brusselator_2d_loop,u0,p,0.0)
+
+ + +
+DiffEqOperators.JacVecOperator{Float64,typeof(Main.WeaveSandBox0.brusselato
+r_2d_loop),Array{ForwardDiff.Dual{DiffEqOperators.JacVecTag,Float64,1},3},A
+rray{ForwardDiff.Dual{DiffEqOperators.JacVecTag,Float64,1},3},Array{Float64
+,3},NTuple{4,Float64},Float64,Bool}(Main.WeaveSandBox0.brusselator_2d_loop,
+ ForwardDiff.Dual{DiffEqOperators.JacVecTag,Float64,1}[Dual{DiffEqOperators
+.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.12134432813715876,0.
+12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.1213443281371586,0.1
+213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); Dual{DiffEqOpera
+tors.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.1213443281371587
+6,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.1213443281371586
+,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); … ; Dual{Dif
+fEqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.12134432
+813715876,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.12134432
+81371586,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); Dual
+{DiffEqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.1213
+4432813715876,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.1213
+443281371586,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0)]
+
+ForwardDiff.Dual{DiffEqOperators.JacVecTag,Float64,1}[Dual{DiffEqOperators.
+JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0,0.0) … Dual{DiffEqO
+perators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); Dual
+{DiffEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755) Dual{D
+iffEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755) … Dual{D
+iffEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755) Dual{Dif
+fEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755); … ; Dual{
+DiffEqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738) Dual{Di
+ffEqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738) … Dual{Di
+ffEqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738) Dual{Diff
+EqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738); Dual{DiffE
+qOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0,0.0) … D
+ual{DiffEqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0
+,0.0)], ForwardDiff.Dual{DiffEqOperators.JacVecTag,Float64,1}[Dual{DiffEqOp
+erators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.1213443281371
+5876,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.1213443281371
+586,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); Dual{Diff
+EqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.121344328
+13715876,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.121344328
+1371586,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); … ; D
+ual{DiffEqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.1
+2134432813715876,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}(0.1
+213443281371586,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0,0.0
+); Dual{DiffEqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}
+(0.12134432813715876,0.12134432813715876) … Dual{DiffEqOperators.JacVecTag}
+(0.1213443281371586,0.1213443281371586) Dual{DiffEqOperators.JacVecTag}(0.0
+,0.0)]
+
+ForwardDiff.Dual{DiffEqOperators.JacVecTag,Float64,1}[Dual{DiffEqOperators.
+JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0,0.0) … Dual{DiffEqO
+perators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0,0.0); Dual
+{DiffEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755) Dual{D
+iffEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755) … Dual{D
+iffEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755) Dual{Dif
+fEqOperators.JacVecTag}(0.14892258453196755,0.14892258453196755); … ; Dual{
+DiffEqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738) Dual{Di
+ffEqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738) … Dual{Di
+ffEqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738) Dual{Diff
+EqOperators.JacVecTag}(0.14892258453196738,0.14892258453196738); Dual{DiffE
+qOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0,0.0) … D
+ual{DiffEqOperators.JacVecTag}(0.0,0.0) Dual{DiffEqOperators.JacVecTag}(0.0
+,0.0)], [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.1213443281
+3715876 … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.121344328
+1371586 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
+
+[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
+196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
+.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0], (3.4, 1.0, 10.0
+, 0.03225806451612903), 0.0, true, false, true)
+
+ + +

and then we can use this by making it our jac_prototype:

+ + +
+f = ODEFunction(brusselator_2d_loop;jac_prototype=Jv)
+prob_ode_brusselator_2d_jacfree = ODEProblem(f,u0,(0.,11.5),p)
+@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)
+
+ + +
+3.066 s (1875298 allocations: 78.86 MiB)
+retcode: Success
+Interpolation: 1st order linear
+t: 2-element Array{Float64,1}:
+  0.0
+ 11.5
+u: 2-element Array{Array{Float64,3},1}:
+ [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715876
+ … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371586
+ 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
+
+[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
+196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
+.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]                 
+                                                                           
+                                                                           
+                                            
+ [2.7872216645408567 2.787194432792592 … 2.78731308303355 2.787261467045361
+5; 2.787271339364228 2.787240720815802 … 2.787374377179153 2.78731605405600
+74; … ; 2.787134161549321 2.7871118187949984 … 2.7872072238860723 2.7871659
+77632712; 2.7871755020101205 2.7871508886342986 … 2.7872566948955084 2.7872
+10735234632]
+
+[2.8988126677437585 2.8988142936416157 … 2.8988075464551772 2.8988105556623
+86; 2.898808902249186 2.8988104514436563 … 2.898803969323616 2.898806883740
+06; … ; 2.898820028584711 2.898821666296394 … 2.898814592161897 2.898817604
+8750383; 2.8988163685403467 2.8988181996160387 … 2.8988111330962316 2.89881
+40808038274]
+
+ + +

Adding a Preconditioner

+

The linear solver documentation shows how you can add a preconditioner to the GMRES. For example, you can use packages like AlgebraicMultigrid.jl to add an algebraic multigrid (AMG) or IncompleteLU.jl for an incomplete LU-factorization (iLU).

+ + +
+using AlgebraicMultigrid
+pc = aspreconditioner(ruge_stuben(jac_sparsity))
+@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES(Pl=pc)),save_everystep=false)
+
+ + +
+2.456 s (233048 allocations: 139.27 MiB)
+retcode: Success
+Interpolation: 1st order linear
+t: 2-element Array{Float64,1}:
+  0.0
+ 11.5
+u: 2-element Array{Array{Float64,3},1}:
+ [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715876
+ … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371586
+ 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
+
+[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
+196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
+.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]                 
+                                                                           
+                                                                           
+                                                             
+ [3.5273952159283844e10 -1.4265682748702106e10 … 9234.374594756042 13421.86
+8437681665; -7.091075675799031e9 6.51451873695435e9 … 9234.400545337947 134
+21.868410996974; … ; 13421.868025883945 9234.400562276434 … 9234.4001922958
+72 13421.86842409367; 13421.868438369747 9234.37496117112 … 9234.3749749346
+17 13421.868424050659]
+
+[66730.63093229767 -115820.52698935539 … 16462.92400611659 16458.1794290617
+3; 8.043448946694581e6 1.307043107719831e7 … 11331.237739674985 11326.51840
+7046895; … ; 11326.51842066477 11331.237738901911 … 11331.237752373656 1132
+6.518406581376; 16458.179429033426 16462.923993307235 … 16462.923992815315 
+16458.179429539887]
+
+ + +

Using Structured Matrix Types

+

If your sparsity pattern follows a specific structure, for example a banded matrix, then you can declare jac_prototype to be of that structure and then additional optimizations will come for free. Note that in this case, it is not necessary to provide a colorvec since the color vector will be analytically derived from the structure of the matrix.

+

The matrices which are allowed are those which satisfy the ArrayInterface.jl interface for automatically-colorable matrices. These include:

+ +

Matrices which do not satisfy this interface can still be used, but the matrix coloring will not be automatic, and an appropriate linear solver may need to be given (otherwise it will default to attempting an LU-decomposition).

+

Sundials-Specific Handling

+

While much of the setup makes the transition to using Sundials automatic, there are some differences between the pure Julia implementations and the Sundials implementations which must be taken note of. These are all detailed in the Sundials solver documentation, but here we will highlight the main details which one should make note of.

+

Defining a sparse matrix and a Jacobian for Sundials works just like any other package. The core difference is in the choice of the linear solver. With Sundials, the linear solver choice is done with a Symbol in the linear_solver from a preset list. Particular choices of note are :Band for a banded matrix and :GMRES for using GMRES. If you are using Sundials, :GMRES will not require defining the JacVecOperator, and instead will always make use of a Jacobian-Free Newton Krylov (with numerical differentiation). Thus on this problem we could do:

+ + +
+using Sundials
+# Sparse Version
+@btime solve(prob_ode_brusselator_2d_sparse,CVODE_BDF(),save_everystep=false)
+
+ + +
+28.133 s (51388 allocations: 3.20 MiB)
+
+ + + +
+# GMRES Version: Doesn't require any extra stuff!
+@btime solve(prob_ode_brusselator_2d,CVODE_BDF(linear_solver=:GMRES),save_everystep=false)
+
+ + +
+323.286 ms (61058 allocations: 3.63 MiB)
+retcode: Success
+Interpolation: 1st order linear
+t: 2-element Array{Float64,1}:
+  0.0
+ 11.5
+u: 2-element Array{Array{Float64,3},1}:
+ [0.0 0.12134432813715876 … 0.1213443281371586 0.0; 0.0 0.12134432813715876
+ … 0.1213443281371586 0.0; … ; 0.0 0.12134432813715876 … 0.1213443281371586
+ 0.0; 0.0 0.12134432813715876 … 0.1213443281371586 0.0]
+
+[0.0 0.0 … 0.0 0.0; 0.14892258453196755 0.14892258453196755 … 0.14892258453
+196755 0.14892258453196755; … ; 0.14892258453196738 0.14892258453196738 … 0
+.14892258453196738 0.14892258453196738; 0.0 0.0 … 0.0 0.0]                 
+                                                                           
+                                                                           
+                                                  
+ [0.45369441125092624 0.45367162922766396 … 0.45377307354145824 0.453728249
+24331306; 0.45372813444006976 0.45370139820263283 … 0.45382031508907966 0.4
+537681622154197; … ; 0.4536347409999057 0.4536184243336325 … 0.453690734603
+503 0.4536589378647838; 0.4536631791063342 0.4536436405637919 … 0.453729310
+5001047 0.45369169445940305]
+
+[5.023428953606044 5.023425514309876 … 5.02343972583798 5.0234337753788845;
+ 5.023442660236476 5.023439873077652 … 5.02345101637559 5.023446317614284; 
+… ; 5.023404093671991 5.023399216246354 … 5.023419229667771 5.0234107290209
+42; 5.023415926060523 5.023411776722086 … 5.02342895844194 5.02342180621704
+3]
+
+ + +

Details for setting up a preconditioner with Sundials can be found at the Sundials solver page.

+

Handling Mass Matrices

+

Instead of just defining an ODE as $u' = f(u,p,t)$, it can be common to express the differential equation in the form with a mass matrix:

+

\[ +Mu' = f(u,p,t) +\]

+

where $M$ is known as the mass matrix. Let's solve the Robertson equation. At the top we wrote this equation as:

+

\[ +\begin{align} +dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ +dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ +dy_3 &= 3*10^7 y_{3}^2 \\ +\end{align} +\]

+

But we can instead write this with a conservation relation:

+

\[ +\begin{align} +dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ +dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ +1 &= y_{1} + y_{2} + y_{3} \\ +\end{align} +\]

+

In this form, we can write this as a mass matrix ODE where $M$ is singular (this is another form of a differential-algebraic equation (DAE)). Here, the last row of M is just zero. We can implement this form as:

+ + +
+using DifferentialEquations
+function rober(du,u,p,t)
+  y₁,y₂,y₃ = u
+  k₁,k₂,k₃ = p
+  du[1] = -k₁*y₁+k₃*y₂*y₃
+  du[2] =  k₁*y₁-k₂*y₂^2-k₃*y₂*y₃
+  du[3] =  y₁ + y₂ + y₃ - 1
+  nothing
+end
+M = [1. 0  0
+     0  1. 0
+     0  0  0]
+f = ODEFunction(rober,mass_matrix=M)
+prob_mm = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
+sol = solve(prob_mm,Rodas5())
+
+plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1))
+
+ + + + +

Note that if your mass matrix is singular, i.e. your system is a DAE, then you need to make sure you choose a solver that is compatible with DAEs

+ + + +
+ + + +
+
+
+ + diff --git a/html/models/03-diffeqbio_I_introduction.html b/html/models/03-diffeqbio_I_introduction.html index d961bd5c..ef931196 100644 --- a/html/models/03-diffeqbio_I_introduction.html +++ b/html/models/03-diffeqbio_I_introduction.html @@ -733,19 +733,19 @@
Samuel Isaacson
-latexify(repressilator)
+latexify(repressilator, cdot=false)
 
\begin{align*} -\frac{dm_1}{dt} =& \frac{\alpha \cdot K^{n}}{K^{n} + P_3^{n}} - \delta \cdot m_1 + \gamma \\ -\frac{dm_2}{dt} =& \frac{\alpha \cdot K^{n}}{K^{n} + P_1^{n}} - \delta \cdot m_2 + \gamma \\ -\frac{dm_3}{dt} =& \frac{\alpha \cdot K^{n}}{K^{n} + P_2^{n}} - \delta \cdot m_3 + \gamma \\ -\frac{dP_1}{dt} =& \beta \cdot m_1 - \mu \cdot P_1 \\ -\frac{dP_2}{dt} =& \beta \cdot m_2 - \mu \cdot P_2 \\ -\frac{dP_3}{dt} =& \beta \cdot m_3 - \mu \cdot P_3 \\ +\frac{dm₁(t)}{dt} =& \frac{\alpha K^{n}}{K^{n} + P_3^{n}} - \delta m_1 + \gamma \\ +\frac{dm₂(t)}{dt} =& \frac{\alpha K^{n}}{K^{n} + P_1^{n}} - \delta m_2 + \gamma \\ +\frac{dm₃(t)}{dt} =& \frac{\alpha K^{n}}{K^{n} + P_2^{n}} - \delta m_3 + \gamma \\ +\frac{dP₁(t)}{dt} =& \beta m_1 - \mu P_1 \\ +\frac{dP₂(t)}{dt} =& \beta m_2 - \mu P_2 \\ +\frac{dP₃(t)}{dt} =& \beta m_3 - \mu P_3 \end{align*} @@ -821,7 +821,7 @@

Solving the ODEs:

- +

We see the well-known oscillatory behavior of the repressilator! For more on choices of ODE solvers, see the JuliaDiffEq documentation.


@@ -845,7 +845,7 @@

Stochastic Simulation Algorithms (SSAs) for Stochastic Chemical Kine - +

Here we see that oscillations remain, but become much noiser. Note, in constructing the JumpProblem we could have used any of the SSAs that are part of DiffEqJump instead of the Direct method, see the list of SSAs (i.e. constant rate jump aggregators) in the documentation.


@@ -861,7 +861,7 @@

$\tau$-leaping Methods:

- +

Chemical Langevin Equation (CLE) Stochastic Differential Equation (SDE) Models:

@@ -882,10 +882,21 @@

Chemical Langevin Equation (CLE) Stochastic Differential Equation &#

The corresponding Chemical Langevin Equation SDE is then

-

\[ -dX_t = \left(c_1 X - c_2 X + c_3 \right) dt + \left( \sqrt{c_1 X} - \sqrt{c_2 X} + \sqrt{c_3} \right)dW_t, -\]

-

where $W_t$ denotes a standard Brownian Motion. We can solve the CLE SDE model by creating an SDEProblem and solving it similar to what we did for ODEs above:

+ + +
+latexify(bdp, noise=true, cdot=false)
+
+ + + + +\begin{align*} +\mathrm{dX}\left( t \right) =& \left( c_1 X - c_2 X + c_3 \right) dt + \sqrt{\left\|c_1 X\right\|} \mathrm{dW_1}\left( t \right) - \sqrt{\left\|c_2 X\right\|} \mathrm{dW_2}\left( t \right) + \sqrt{\left\|c_3\right\|} \mathrm{dW_3}\left( t \right) +\end{align*} + + +

where each $W_i(t)$ denotes an independent Brownian Motion. We can solve the CLE SDE model by creating an SDEProblem and solving it similar to what we did for ODEs above:

@@ -899,7 +910,7 @@ 

Chemical Langevin Equation (CLE) Stochastic Differential Equation &#

- +

We again have complete freedom to select any of the StochasticDifferentialEquations.jl SDE solvers, see the documentation.


@@ -914,7 +925,7 @@

What information can be queried from the reaction_network:

-latexify(jacobianexprs(repressilator))
+latexify(jacobianexprs(repressilator), cdot=false)
 
@@ -923,9 +934,9 @@

What information can be queried from the reaction_network:

\begin{equation*} \left[ \begin{array}{cccccc} - - \delta & 0 & 0 & 0 & 0 & \frac{ - K^{n} \cdot n \cdot \alpha \cdot P_3^{-1 + n}}{\left( K^{n} + P_3^{n} \right)^{2}} \\ -0 & - \delta & 0 & \frac{ - K^{n} \cdot n \cdot \alpha \cdot P_1^{-1 + n}}{\left( K^{n} + P_1^{n} \right)^{2}} & 0 & 0 \\ -0 & 0 & - \delta & 0 & \frac{ - K^{n} \cdot n \cdot \alpha \cdot P_2^{-1 + n}}{\left( K^{n} + P_2^{n} \right)^{2}} & 0 \\ + - \delta & 0 & 0 & 0 & 0 & \frac{ - K^{n} n \alpha P_3^{-1 + n}}{\left( K^{n} + P_3^{n} \right)^{2}} \\ +0 & - \delta & 0 & \frac{ - K^{n} n \alpha P_1^{-1 + n}}{\left( K^{n} + P_1^{n} \right)^{2}} & 0 & 0 \\ +0 & 0 & - \delta & 0 & \frac{ - K^{n} n \alpha P_2^{-1 + n}}{\left( K^{n} + P_2^{n} \right)^{2}} & 0 \\ \beta & 0 & 0 & - \mu & 0 & 0 \\ 0 & \beta & 0 & 0 & - \mu & 0 \\ 0 & 0 & \beta & 0 & 0 & - \mu \\ @@ -955,58 +966,44 @@

Getting Help

Computer Information:

-
Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
+
Julia Version 1.2.0
+Commit c6da87ff4b (2019-08-20 00:03 UTC)
 Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
+  OS: macOS (x86_64-apple-darwin18.6.0)
+  CPU: Intel(R) Core(TM) i7-6920HQ CPU @ 2.90GHz
   WORD_SIZE: 64
   LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
+  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
 

Package Information:

-
Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
-[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
+
Status `~/.julia/environments/v1.2/Project.toml`
+[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.3
+[a93c6f00-e57d-5684-b7b6-d8193f3e46c0] DataFrames 0.19.4
+[2b5f629d-d688-5b77-993f-72d75c75574e] DiffEqBase 6.3.4
+[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 4.0.1
+[c894b116-72e5-5b58-be3c-e6d8d4ac2b12] DiffEqJump 6.2.2
+[a077e3f3-b75c-5d7f-a0c6-6bc4c8ec64a9] DiffEqProblemLibrary 4.5.1
 [6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
-[f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
-[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
-[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
-[90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
-[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
+[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.8.0
+[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.20.0
+[42fd0dbc-a981-5370-80f2-aaf504508153] IterativeSolvers 0.8.1
+[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.11.0
+[54ca160b-1b9f-5127-a996-1867f4bc2a2c] ODEInterface 0.4.6
+[47be7bcc-f1a6-5447-8b36-7eeeff7534fd] ORCA 0.3.0
+[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.17.2
+[f0f68f2c-4968-5e81-91da-67840de0976a] PlotlyJS 0.13.0
+[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.27.0
+[438e738f-606a-5dbb-bf0a-cddfbfd45ab0] PyCall 1.91.2
+[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.2
+[b4db0fb7-de2a-5028-82bf-5021f5cfa881] ReactionNetworkImporters 0.1.5
+[295af30f-e4ad-537b-8983-00126c2a3abe] Revise 2.2.0
+[789caeaf-c7a9-5a7d-9973-96adeb23e2a0] StochasticDiffEq 6.11.2
+[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.7.0
+[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.1
 [b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
+[d6f4376e-aef5-505a-96c1-9c027394607a] Markdown
@@ -1015,7 +1012,7 @@

Getting Help

diff --git a/html/models/04-diffeqbio_II_networkproperties.html b/html/models/04-diffeqbio_II_networkproperties.html index fef9591f..a76cded8 100644 --- a/html/models/04-diffeqbio_II_networkproperties.html +++ b/html/models/04-diffeqbio_II_networkproperties.html @@ -1238,11 +1238,11 @@

Example of Generating a Network Programmatically

-

We are now ready to solve the problem and plot the solution. Since we have essentially generated a method of lines discretization of the diffusion equation with a discontinuous initial condition, we'll use an A-L stable implicit ODE solver, KenCarp4, and plot the solution at a few times:

+

We are now ready to solve the problem and plot the solution. Since we have essentially generated a method of lines discretization of the diffusion equation with a discontinuous initial condition, we'll use an A-L stable implicit ODE solver, Rodas5, and plot the solution at a few times:

-sol = solve(oprob, KenCarp4())
+sol = solve(oprob, Rodas5())
 times = [0., .0001, .001, .01]
 plt = plot()
 for time in times
@@ -1252,52 +1252,39 @@ 

Example of Generating a Network Programmatically

- +

Here we see the characteristic diffusion of molecules from the center of the domain, resulting in a shortening and widening of the solution as $t$ increases.

Let's now look at a stochastic chemical kinetics jump process version of the model, where β gives the probability per time each molecule can hop from its current lattice site to an individual neighboring site. We first add in the jumps, disabling regular_jumps since they are not needed, and using the minimal_jumps flag to construct a minimal representation of the needed jumps. We then construct a JumpProblem, and use the Composition-Rejection Direct method, DirectCR, to simulate the process of the molecules hopping about on the lattice:

-addjumps!(rn, build_regular_jumps=false, minimal_jumps=true)
-
- - -
-ERROR: MethodError: Cannot `convert` an object of type Tuple{} to an object of type Tuple{Union{Float64, Int64, Expr, Symbol},Vararg{Union{Float64, Int64, Expr, Symbol},N} where N}
-Closest candidates are:
-  convert(::Type{T<:Tuple{Any,Vararg{Any,N} where N}}, !Matched::T<:Tuple{Any,Vararg{Any,N} where N}) where T<:Tuple{Any,Vararg{Any,N} where N} at essentials.jl:274
-  convert(::Type{T<:Tuple{Any,Vararg{Any,N} where N}}, !Matched::Tuple{Any,Vararg{Any,N} where N}) where T<:Tuple{Any,Vararg{Any,N} where N} at essentials.jl:275
-  convert(::Type{T<:Tuple}, !Matched::CartesianIndex) where T<:Tuple at multidimensional.jl:130
-  ...
-
+addjumps!(rn, build_regular_jumps=false, minimal_jumps=true) - - -
-# make the initial condition integer valued 
+# make the initial condition integer valued 
 u₀ = zeros(Int, N)
 u₀[div(N,2)] = 10000
 
 # setup and solve the problem
 dprob = DiscreteProblem(rn, u₀, tspan, p)
-jprob = JumpProblem(dprob, DirectCR(), rn, save_positions=(false,false))
-
- - -
-ERROR: Call addjumps! before constructing JumpProblems
-
- - - -
-jsol = solve(jprob, SSAStepper(), saveat=times)
+jprob = JumpProblem(dprob, DirectCR(), rn, save_positions=(false,false))
+jsol = solve(jprob, SSAStepper(), saveat=times)
 
-
-ERROR: UndefVarError: jprob not defined
+
+retcode: Default
+Interpolation: Piecewise constant interpolation
+t: 4-element Array{Float64,1}:
+ 0.0   
+ 0.0001
+ 0.001 
+ 0.01  
+u: 4-element Array{Array{Int64,1},1}:
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0  …  0, 0, 0, 0, 0, 0, 0, 0, 0, 0]       
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0  …  0, 0, 0, 0, 0, 0, 0, 0, 0, 0]       
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0  …  0, 0, 0, 0, 0, 0, 0, 0, 0, 0]       
+ [3, 4, 2, 3, 12, 6, 5, 17, 21, 22  …  19, 13, 10, 9, 5, 5, 4, 1, 0, 0]
 
@@ -1311,22 +1298,12 @@

Example of Generating a Network Programmatically

b = bar(1:N, jsol[i], legend=false, fmt=fmt, xlabel="i", ylabel="uᵢ", title=string("t = ", times[i])) plot!(b,sol(times[i])) push!(plts,b) -end -
- - -
-ERROR: UndefVarError: jsol not defined
-
- - - -
-plot(plts...)
+end
+plot(plts...)
 
- +

Similar to the ODE solutions, we see that the molecules spread out and become more and more well-mixed throughout the domain as $t$ increases. The simulation results are noisy due to the finite numbers of molecules present in the stochsatic simulation, but since the number of molecules is large they agree well with the ODE solution at each time.


@@ -1345,57 +1322,113 @@

Getting Help

Computer Information:

Julia Version 1.1.1
-Commit 55e36cc308 (2019-05-16 04:10 UTC)
+Commit 55e36cc (2019-05-16 04:10 UTC)
 Platform Info:
-  OS: Linux (x86_64-pc-linux-gnu)
-  CPU: Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
+  OS: macOS (x86_64-apple-darwin15.6.0)
+  CPU: Intel(R) Core(TM) i7-6920HQ CPU @ 2.90GHz
   WORD_SIZE: 64
   LIBM: libopenlibm
-  LLVM: libLLVM-6.0.1 (ORCJIT, ivybridge)
+  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
 

Package Information:

Status `~/.julia/environments/v1.1/Project.toml`
-[7e558dbc-694d-5a72-987c-6f4ebed21442] ArbNumerics 0.5.4
+[14f7f29c-3bd6-536c-9a0b-7339e30b5a3e] AMD 0.3.0
+[28f2ccd6-bb30-5033-b560-165f7b14dc2f] ApproxFun 0.11.3
+[c52e3926-4ff0-5f6e-af25-54175e0327b1] Atom 0.8.5
+[aae01518-5342-5314-be14-df237901396f] BandedMatrices 0.9.4
 [6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.2
-[be33ccc6-a3ff-5ff2-a52e-74243cff1e17] CUDAnative 2.2.0
-[3a865a2d-5b23-5a0f-bc46-62713ec82fae] CuArrays 1.0.2
-[55939f99-70c6-5e9b-8bb0-5071ed7d61fd] DecFP 0.4.8
-[abce61dc-4473-55a0-ba07-351d65e31d42] Decimals 0.4.0
-[ebbdde9d-f333-5424-9be2-dbf1e9acfb5e] DiffEqBayes 1.1.0
-[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.8.2
-[459566f4-90b8-5000-8ac3-15dfb0a30def] DiffEqCallbacks 2.5.2
-[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.9.0
-[1130ab10-4a5a-5621-a13d-e4788d82bd4c] DiffEqParamEstim 1.6.0
-[055956cb-9e8b-5191-98cc-73ae4a59e68a] DiffEqPhysics 3.1.0
+[ad839575-38b3-5650-b840-f874b8c74a25] Blink 0.10.1
+[336ed68f-0bac-5ca0-87d4-7b16caf5d00b] CSV 0.5.11
+[5d742f6a-9f54-50ce-8119-2520741973ca] CSVFiles 0.15.0
+[159f3aea-2a34-519c-b102-8c37f9878175] Cairo 0.5.6
+[3da002f7-5984-5a60-b8a6-cbb66c0b333f] ColorTypes 0.8.0
+[a93c6f00-e57d-5684-b7b6-d8193f3e46c0] DataFrames 0.19.2
+[864edb3b-99cc-5e75-8d2d-829cb0a9cfe8] DataStructures 0.17.0
+[2b5f629d-d688-5b77-993f-72d75c75574e] DiffEqBase 5.20.0
+[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 3.9.0
+[f3b72e0c-5b89-59e1-b016-84e28bfd966d] DiffEqDevTools 2.14.0
+[c894b116-72e5-5b58-be3c-e6d8d4ac2b12] DiffEqJump 6.2.0
+[78ddff82-25fc-5f2b-89aa-309469cbf16f] DiffEqMonteCarlo 0.15.1
+[9fdde737-9c7f-55bf-ade8-46b3f136cc48] DiffEqOperators 4.1.0
+[34035eb4-37db-58ae-b003-a3202c898701] DiffEqPDEBase 0.4.0
+[a077e3f3-b75c-5d7f-a0c6-6bc4c8ec64a9] DiffEqProblemLibrary 4.5.1
 [6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
-[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.4.0
-[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.20.0
-[497a8b3b-efae-58df-a0af-a86822472b78] DoubleFloats 0.9.1
+[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.6.0
+[aaf54ef3-cdf8-58ed-94cc-d582ad619b94] DistributedArrays 0.6.3
+[31c24e10-a181-5473-b8eb-7969acd0382f] Distributions 0.21.1
+[e30172f5-a6a5-5a46-863b-614d45cd2de4] Documenter 0.23.2
+[5789e2e9-d7fb-5bc7-8068-2c6fae9b9549] FileIO 1.0.7
 [f6369f11-7733-5829-9624-2563aa707210] ForwardDiff 0.10.3
-[c91e804a-d5a3-530f-b6f0-dfbca275c004] Gadfly 1.0.1
-[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.18.1
-[4138dd39-2aa7-5051-a626-17a0bb65d9c8] JLD 0.9.1
+[069b7b12-0de2-55c6-9aab-29f3d0a68a2e] FunctionWrappers 1.0.0
+[28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71] GR 0.41.0
+[14197337-ba66-59df-a3e3-ca00e7dcff7a] GenericLinearAlgebra 0.1.0
+[4c0ca9eb-093a-5379-98c5-f87ac0bbbf44] Gtk 0.17.0
+[19dc6840-f33b-545b-b366-655c7e3ffd49] HCubature 1.4.0
+[f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f] HDF5 0.12.0
+[cd3eb016-35fb-5094-929b-558a96fad6f3] HTTP 0.7.1
+[09f84164-cd44-5f33-b23f-e6b0d136a0d5] HypothesisTests 0.8.0
+[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.19.0
+[42fd0dbc-a981-5370-80f2-aaf504508153] IterativeSolvers 0.8.1
+[30d91d44-8115-11e8-1d28-c19a5ac16de8] JuAFEM 0.2.0
+[f80590ac-b429-510a-8a99-e7c46989f22d] JuliaFEM 0.5.0
+[aa1ae85d-cabe-5617-a682-6adf51b2e16a] JuliaInterpreter 0.5.2
+[e5e0dc1b-0480-54bc-9374-aad01c23163d] Juno 0.7.2
+[0b1a1467-8014-51b9-945f-bf0ae24f4b77] KrylovKit 0.3.4
+[b964fa9f-0449-5b57-a5c2-d3ea65f4040f] LaTeXStrings 1.0.3
+[2b0e0bc5-e4fd-59b4-8912-456d1b03d8d7] LanguageServer 0.6.0
 [23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.8.2
-[eff96d63-e80a-5855-80a2-b1b0885c5ab7] Measurements 2.0.0
-[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.2.0
-[76087f3c-5699-56af-9a33-bf431cd00edd] NLopt 0.5.1
-[2774e3e8-f4cf-5e23-947b-6d7e65073b56] NLsolve 4.0.0
-[429524aa-4258-5aef-a3af-852621145aeb] Optim 0.18.1
-[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.8.1
-[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.1.1
-[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.25.1
+[5078a376-72f3-5289-bfd5-ec5146d43c02] LazyArrays 0.9.1
+[093fc24a-ae57-5d10-9952-331d41423f4d] LightGraphs 1.2.0
+[7a12625a-238d-50fd-b39a-03d52299707e] LinearMaps 2.5.0
+[23992714-dd62-5051-b70f-ba57cb901cac] MAT 0.5.0
+[1914dd2f-81c6-5fcd-8719-6d5c9610ff09] MacroTools 0.5.1
+[961ee093-0014-501f-94e3-6117800e7a78] ModelingToolkit 0.6.4
+[46d2c3a1-f734-5fdb-9937-b9b9aeba4221] MuladdMacro 0.2.1
+[47be7bcc-f1a6-5447-8b36-7eeeff7534fd] ORCA 0.2.1
+[510215fc-4207-5dde-b226-833fc4488ee2] Observables 0.2.3
+[5fb14364-9ced-5910-84b2-373655c76a03] OhMyREPL 0.5.1
+[bac558e1-5e72-5ebc-8fee-abe8a469f55d] OrderedCollections 1.1.0
+[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.14.0
+[3b7a836e-365b-5785-a47d-02c71176b4aa] PGFPlots 3.1.3
+[9b87118b-4619-50d2-8e1e-99f35a4d4d9d] PackageCompiler 0.6.4
+[65888b18-ceab-5e60-b2b9-181511a3b968] ParameterizedFunctions 4.2.1
+[d96e819e-fc66-5662-9728-84c9c7592b0a] Parameters 0.11.0
+[995b91a9-d308-5afd-9ec6-746e21dbc043] PlotUtils 0.5.8
+[58dd65bb-95f3-509e-9936-c39a10fdeae7] Plotly 0.2.0
+[f0f68f2c-4968-5e81-91da-67840de0976a] PlotlyJS 0.12.5
+[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.26.1
+[f27b6e38-b328-58d1-80ce-0feddd5e7a45] Polynomials 0.5.2
+[27ebfcd6-29c5-5fa9-bf4b-fb8fc14df3ae] Primes 0.4.0
+[c46f51b8-102a-5cf2-8d2c-8597cb0e0da7] ProfileView 0.4.1
+[438e738f-606a-5dbb-bf0a-cddfbfd45ab0] PyCall 1.91.2
 [d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.1
-[731186ca-8d62-57ce-b412-fbd966d074cd] RecursiveArrayTools 0.20.0
+[1fd47b50-473d-5c70-9696-f719f8f3bcdc] QuadGK 2.0.3
+[e6cf234a-135c-5ec9-84dd-332b85af5143] RandomNumbers 1.3.0
+[b4db0fb7-de2a-5028-82bf-5021f5cfa881] ReactionNetworkImporters 0.1.5
+[295af30f-e4ad-537b-8983-00126c2a3abe] Revise 2.1.6
+[c4c386cf-5103-5370-be45-f3a111cca3b8] Rsvg 0.2.3
+[276daf66-3868-5448-9aa4-cd146d93841b] SpecialFunctions 0.7.2
 [90137ffa-7385-5640-81b9-e52037218182] StaticArrays 0.11.0
-[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.11.0
+[2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91] StatsBase 0.32.0
+[f3b207a7-027a-5e70-b257-86293d7955fd] StatsPlots 0.10.2
+[9672c7b4-1e72-59bd-8a11-6ac3964bc41f] SteadyStateDiffEq 1.5.0
+[789caeaf-c7a9-5a7d-9973-96adeb23e2a0] StochasticDiffEq 6.8.0
 [c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.6.1
-[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.15.0
-[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.0
-[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
-[37e2e46d-f89d-539d-b4ee-838fcccc9c8e] LinearAlgebra
-[44cfe95a-1eb2-52ea-b672-e2afdf69b78f] Pkg
+[123dc426-2d89-5057-bbad-38513e3affd8] SymEngine 0.7.0 +[e0df1984-e451-5cb5-8b61-797a481e67e3] TextParse 0.9.1 +[a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f] TimerOutputs 0.5.0 +[37b6cedf-1f77-55f8-9503-c64b63398394] Traceur 0.3.0 +[28d57a85-8fef-5791-bfe6-a80928e7c999] Transducers 0.3.1 +[39424ebd-4cf3-5550-a685-96706a953f40] TreeView 0.3.1 +[b8865327-cd53-5732-bb35-84acbb429228] UnicodePlots 1.1.0 +[1986cc42-f94f-5a68-af5c-568840ba703d] Unitful 0.16.0 +[2a06ce6d-1589-592b-9c33-f37faeaed826] UnitfulPlots 0.0.0 +[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.1 +[0f1e0344-ec1d-5b48-a673-e5cf874b6c29] WebIO 0.8.9 +[9abbd945-dff8-562f-b5e8-e1ebf5ef1b79] Profile +[2f01184e-e22b-5df5-ae63-d93ebab69eaf] SparseArrays
@@ -1404,7 +1437,7 @@

Getting Help

diff --git a/html/models/04b-diffeqbio_III_steadystates.html b/html/models/04b-diffeqbio_III_steadystates.html new file mode 100644 index 00000000..7bba7050 --- /dev/null +++ b/html/models/04b-diffeqbio_III_steadystates.html @@ -0,0 +1,954 @@ + + + + + + DiffEqBiological Tutorial III: Steady-States and Bifurcations + + + + + + + + + + + + + + + + + +
+
+
+ +
+

DiffEqBiological Tutorial III: Steady-States and Bifurcations

+
Torkel Loman and Samuel Isaacson
+ +
+ +

Several types of steady state analysis can be performed for networks defined with DiffEqBiological by utilizing homotopy continuation. This allows for finding the steady states and bifurcations within a large class of systems. In this tutorial we'll go through several examples of using this functionality.

+

We start by loading the necessary packages:

+ + +
+using DiffEqBiological, Plots
+gr(); default(fmt = :png);
+
+ + + +

Steady states and stability of a biochemical reaction network.

+

Bistable switches are well known biological motifs, characterised by the presence of two different stable steady states.

+ + +
+bistable_switch = @reaction_network begin
+    d,    (X,Y)  
+    hillR(Y,v1,K1,n1),   X
+    hillR(X,v2,K2,n2),   Y
+end d v1 K1 n1 v2 K2 n2
+d = 0.01;
+v1 = 1.5; K1 = 30; n1 = 3;
+v2 = 1.; K2 = 30; n2 = 3;
+bistable_switch_p = [d, v1 ,K1, n1, v2, K2, n2];
+
+ + + +

The steady states can be found using the steady_states function (which takes a reaction network and a set of parameter values as input). The stability of these steady states can be found using the stability function.

+ + +
+ss = steady_states(bistable_switch, bistable_switch_p)
+
+ + +
+3-element Array{Array{Float64,1},1}:
+ [31.322504001213243, 46.769050724087236]
+ [3.970283396636649, 99.76874280256095]  
+ [149.9972223365578, 0.7936945352275889]
+
+ + + +
+stability(ss,bistable_switch, bistable_switch_p)
+
+ + +
+3-element Array{Bool,1}:
+ 0
+ 1
+ 1
+
+ + +

Since the equilibration methodology is based on homotopy continuation, it is not able to handle systems with non-integer exponents, or non polynomial reaction rates. Neither of the following two systems will work.

+

This system contains a non-integer exponent:

+ + +
+rn1 = @reaction_network begin
+    p,   X
+    hill(X,v,K,n), X  
+end p v K n
+p1 = [1.,2.5,1.5,1.5]
+steady_states(rn1,p1)
+
+ + +
+ERROR: MethodError: no method matching ^(::DynamicPolynomials.PolyVar{true}, ::Float64)
+Closest candidates are:
+  ^(!Matched::Missing, ::Number) at missing.jl:94
+  ^(!Matched::Float64, ::Float64) at math.jl:781
+  ^(!Matched::Irrational{:ℯ}, ::Number) at mathconstants.jl:91
+  ...
+
+ + +

This system contains a logarithmic reaction rate:

+ + +
+rn2 = @reaction_network begin
+    p,   X
+    log(X), X  
+end p
+p2 = [1.]
+steady_states(rn2,p2)
+
+ + +
+ERROR: This reaction network does not correspond to a polynomial system. Some of the reaction rate must contain non polynomial terms.
+
+ + +

Bifurcation diagrams for biochemical reaction networks

+

Bifurcation diagrams illustrate how the steady states of a system depend on one or more parameters. They can be computed with the bifurcations function. It takes the same arguments as steady_states, with the addition of the parameter one wants to vary, and an interval over which to vary it:

+ + +
+bif = bifurcations(bistable_switch, bistable_switch_p, :v1, (.1,5.))
+plot(bif,ylabel="[X]",label="")
+plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"])
+
+ + + + +

The values for the second variable in the system can also be displayed, by giving that as an additional input to plot (it is the second argument, directly after the bifurcation diagram object):

+ + +
+plot(bif,2,ylabel="[Y]")
+plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"])
+
+ + + + +

The plot function also accepts all other arguments which the Plots.jl plot function accepts.

+ + +
+bif = bifurcations(bistable_switch, bistable_switch_p,:v1,(.1,10.))
+plot(bif,linewidth=1.,title="A bifurcation diagram",ylabel="Steady State concentration")
+plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"])
+
+ + + + +

Certain parameters, like n1, cannot be sensibly varied over a continuous interval. Instead, a discrete bifurcation diagram can be calculated with the bifurcation_grid function. Instead of an interval, the last argument is a range of numbers:

+ + +
+bif = bifurcation_grid(bistable_switch, bistable_switch_p,:n1,1.:5.)
+plot(bif)
+scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"])
+
+ + + + +

Bifurcation diagrams over two dimensions

+

In addition to the bifurcation diagrams illustrated above, where only a single variable is varied, it is also possible to investigate the steady state properties of s system as two different parameters are varied. Due to the nature of the underlying bifurcation algorithm it is not possible to continuously vary both parameters. Instead, a set of discrete values are selected for the first parameter, and a continuous interval for the second. Next, for each discrete value of the first parameter, a normal bifurcation diagram is created over the interval given for the second parameter.

+ + +
+bif = bifurcation_grid_diagram(bistable_switch, bistable_switch_p,:n1,0.:4.,:v1,(.1,5.))
+plot(bif)
+plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"])
+
+ + + + +

In the single variable case we could use a bifurcation_grid to investigate the behavior of a parameter which could only attain discrete values. In the same way, if we are interested in two parameters, both of which require integer values, we can use bifrucation_grid_2d. In our case, this is required if we want to vary both the parameters n1 and n2:

+ + +
+bif = bifurcation_grid_2d(bistable_switch, bistable_switch_p,:n1,1.:3.,:n2,1.:10.)
+plot(bif)
+scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"])
+
+ + + + +

The Brusselator

+

The Brusselator is a well know reaction network, which may or may not oscillate, depending on parameter values.

+ + +
+brusselator = @reaction_network begin
+    A,   X
+    1, 2X + Y  3X
+    B, X  Y
+    1, X  
+end A B;
+A = 0.5; B = 4.;
+brusselator_p = [A, B];
+
+ + + +

The system has only one steady state, for $(X,Y)=(A,B/A)$ This fixed point becomes unstable when $B > 1+A^2$, leading to oscillations. Bifurcation diagrams can be used to determine the system's stability, and hence look for where oscillations might appear in the Brusselator:

+ + +
+bif = bifurcations(brusselator,brusselator_p,:B,(0.1,2.5))
+plot(bif,2)
+plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"])
+
+ + + + +

Here red and yellow colors label unstable steady-states, while blue and cyan label stable steady-states. (In addition, yellow and cyan correspond to points where at least one eigenvalue of the Jacobian is imaginary, while red and blue correspond to points with real-valued eigenvalues.)

+

Given A=0.5, the point at which the system should become unstable is B=1.25. We can confirm this in the bifurcation diagram.

+

We can also investigate the behavior when we vary both parameters of the system:

+ + +
+bif = bifurcation_grid_diagram(brusselator,brusselator_p,:B,0.5:0.02:5.0,:A,(0.2,5.0))
+plot(bif)
+plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"])
+
+ + + + +
+

Getting Help

+

Have a question related to DiffEqBiological or this tutorial? Feel free to ask in the DifferentialEquations.jl Gitter. If you think you've found a bug in DiffEqBiological, or would like to request/discuss new functionality, feel free to open an issue on Github (but please check there is no related issue already open). If you've found a bug in this tutorial, or have a suggestion, feel free to open an issue on the DiffEqTutorials Github site. Or, submit a pull request to DiffEqTutorials updating the tutorial!

+
+ + +

Appendix

+

This tutorial is part of the DiffEqTutorials.jl repository, found at: https://github.com/JuliaDiffEq/DiffEqTutorials.jl

+
+

To locally run this tutorial, do the following commands:

+
using DiffEqTutorials
+DiffEqTutorials.weave_file("models","04b-diffeqbio_III_steadystates.jmd")
+
+

Computer Information:

+
+
Julia Version 1.2.0
+Commit c6da87ff4b (2019-08-20 00:03 UTC)
+Platform Info:
+  OS: macOS (x86_64-apple-darwin18.6.0)
+  CPU: Intel(R) Core(TM) i7-6920HQ CPU @ 2.90GHz
+  WORD_SIZE: 64
+  LIBM: libopenlibm
+  LLVM: libLLVM-6.0.1 (ORCJIT, skylake)
+
+
+

Package Information:

+
+
Status `~/.julia/environments/v1.2/Project.toml`
+[6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf] BenchmarkTools 0.4.3
+[a93c6f00-e57d-5684-b7b6-d8193f3e46c0] DataFrames 0.19.4
+[2b5f629d-d688-5b77-993f-72d75c75574e] DiffEqBase 6.3.4
+[eb300fae-53e8-50a0-950c-e21f52c2b7e0] DiffEqBiological 4.0.1
+[c894b116-72e5-5b58-be3c-e6d8d4ac2b12] DiffEqJump 6.2.2
+[a077e3f3-b75c-5d7f-a0c6-6bc4c8ec64a9] DiffEqProblemLibrary 4.5.1
+[6d1b261a-3be8-11e9-3f2f-0b112a9a8436] DiffEqTutorials 0.1.0
+[0c46a032-eb83-5123-abaf-570d42b7fbaa] DifferentialEquations 6.8.0
+[7073ff75-c697-5162-941a-fcdaad2a7d2a] IJulia 1.20.0
+[42fd0dbc-a981-5370-80f2-aaf504508153] IterativeSolvers 0.8.1
+[23fbe1c1-3f47-55db-b15f-69d7ec21a316] Latexify 0.11.0
+[54ca160b-1b9f-5127-a996-1867f4bc2a2c] ODEInterface 0.4.6
+[47be7bcc-f1a6-5447-8b36-7eeeff7534fd] ORCA 0.3.0
+[1dea7af3-3e70-54e6-95c3-0bf5283fa5ed] OrdinaryDiffEq 5.17.2
+[f0f68f2c-4968-5e81-91da-67840de0976a] PlotlyJS 0.13.0
+[91a5bcdd-55d7-5caf-9e0b-520d859cae80] Plots 0.27.0
+[438e738f-606a-5dbb-bf0a-cddfbfd45ab0] PyCall 1.91.2
+[d330b81b-6aea-500a-939a-2ce795aea3ee] PyPlot 2.8.2
+[b4db0fb7-de2a-5028-82bf-5021f5cfa881] ReactionNetworkImporters 0.1.5
+[295af30f-e4ad-537b-8983-00126c2a3abe] Revise 2.2.0
+[789caeaf-c7a9-5a7d-9973-96adeb23e2a0] StochasticDiffEq 6.11.2
+[c3572dad-4567-51f8-b174-8c6c989267f4] Sundials 3.7.0
+[44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9] Weave 0.9.1
+[b77e0a4c-d291-57a0-90e8-8db25a27a240] InteractiveUtils
+[d6f4376e-aef5-505a-96c1-9c027394607a] Markdown
+
+ + + +
+ + + +
+
+
+ + diff --git a/notebook/advanced/02-advanced_ODE_solving.ipynb b/notebook/advanced/02-advanced_ODE_solving.ipynb new file mode 100644 index 00000000..3e01a8ee --- /dev/null +++ b/notebook/advanced/02-advanced_ODE_solving.ipynb @@ -0,0 +1,403 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Solving Stiff Equations\n### Chris Rackauckas\n\nThis tutorial is for getting into the extra features for solving stiff ordinary\ndifferential equations in an efficient manner. Solving stiff ordinary\ndifferential equations requires specializing the linear solver on properties of\nthe Jacobian in order to cut down on the O(n^3) linear solve and the O(n^2)\nback-solves. Note that these same functions and controls also extend to stiff\nSDEs, DDEs, DAEs, etc.\n\n## Code Optimization for Differential Equations\n\n### Writing Efficient Code\n\nFor a detailed tutorial on how to optimize one's DifferentialEquations.jl code,\nplease see the\n[Optimizing DiffEq Code tutorial](http://tutorials.juliadiffeq.org/html/introduction/03-optimizing_diffeq_code.html).\n\n### Choosing a Good Solver\n\nChoosing a good solver is required for getting top notch speed. General\nrecommendations can be found on the solver page (for example, the\n[ODE Solver Recommendations](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html)).\nThe current recommendations can be simplified to a Rosenbrock method\n(`Rosenbrock23` or `Rodas5`) for smaller (<50 ODEs) problems, ESDIRK methods\nfor slightly larger (`TRBDF2` or `KenCarp4` for <2000 ODEs), and Sundials\n`CVODE_BDF` for even larger problems. `lsoda` from\n[LSODA.jl](https://github.com/rveltz/LSODA.jl) is generally worth a try.\n\nMore details on the solver to choose can be found by benchmarking. See the\n[DiffEqBenchmarks](https://github.com/JuliaDiffEq/DiffEqBenchmarks.jl) to\ncompare many solvers on many problems.\n\n### Check Out the Speed FAQ\n\nSee [this FAQ](http://docs.juliadiffeq.org/latest/basics/faq.html#Performance-1)\nfor information on common pitfalls and how to improve performance.\n\n### Setting Up Your Julia Installation for Speed\n\nJulia uses an underlying BLAS implementation for its matrix multiplications\nand factorizations. This library is automatically multithreaded and accelerates\nthe internal linear algebra of DifferentialEquations.jl. However, for optimality,\nyou should make sure that the number of BLAS threads that you are using matches\nthe number of physical cores and not the number of logical cores. See\n[this issue for more details](https://github.com/JuliaLang/julia/issues/33409).\n\nTo check the number of BLAS threads, use:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "ccall((:openblas_get_num_threads64_, Base.libblas_name), Cint, ())" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "If I want to set this directly to 4 threads, I would use:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using LinearAlgebra\nLinearAlgebra.BLAS.set_num_threads(4)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Additionally, in some cases Intel's MKL might be a faster BLAS than the standard\nBLAS that ships with Julia (OpenBLAS). To switch your BLAS implementation, you\ncan use [MKL.jl](https://github.com/JuliaComputing/MKL.jl) which will accelerate\nthe linear algebra routines. Please see the package for the limitations.\n\n### Use Accelerator Hardware\n\nWhen possible, use GPUs. If your ODE system is small and you need to solve it\nwith very many different parameters, see the\n[ensembles interface](http://docs.juliadiffeq.org/latest/features/ensemble.html)\nand [DiffEqGPU.jl](https://github.com/JuliaDiffEq/DiffEqGPU.jl). If your problem\nis large, consider using a [CuArray](https://github.com/JuliaGPU/CuArrays.jl)\nfor the state to allow for GPU-parallelism of the internal linear algebra.\n\n## Speeding Up Jacobian Calculations\n\nWhen one is using an implicit or semi-implicit differential equation solver,\nthe Jacobian must be built at many iterations and this can be one of the most\nexpensive steps. There are two pieces that must be optimized in order to reach\nmaximal efficiency when solving stiff equations: the sparsity pattern and the\nconstruction of the Jacobian. The construction is filling the matrix\n`J` with values, while the sparsity pattern is what `J` to use.\n\nThe sparsity pattern is given by a prototype matrix, the `jac_prototype`, which\nwill be copied to be used as `J`. The default is for `J` to be a `Matrix`,\ni.e. a dense matrix. However, if you know the sparsity of your problem, then\nyou can pass a different matrix type. For example, a `SparseMatrixCSC` will\ngive a sparse matrix. Additionally, structured matrix types like `Tridiagonal`,\n`BandedMatrix` (from\n[BandedMatrices.jl](https://github.com/JuliaMatrices/BandedMatrices.jl)),\n`BlockBandedMatrix` (from\n[BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl)),\nand more can be given. DifferentialEquations.jl will internally use this matrix\ntype, making the factorizations faster by utilizing the specialized forms.\n\nFor the construction, there are 3 ways to fill `J`:\n\n- The default, which uses normal finite/automatic differentiation\n- A function `jac(J,u,p,t)` which directly computes the values of `J`\n- A `colorvec` which defines a sparse differentiation scheme.\n\nWe will now showcase how to make use of this functionality with growing complexity.\n\n### Declaring Jacobian Functions\n\nLet's solve the Rosenbrock equations:\n\n$$\\begin{align}\ndy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\\\\ndy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\\\\ndy_3 &= 3*10^7 y_{3}^2 \\\\\n\\end{align}$$\n\nIn order to reduce the Jacobian construction cost, one can describe a Jacobian\nfunction by using the `jac` argument for the `ODEFunction`. First, let's do\na standard `ODEProblem`:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using DifferentialEquations\nfunction rober(du,u,p,t)\n y₁,y₂,y₃ = u\n k₁,k₂,k₃ = p\n du[1] = -k₁*y₁+k₃*y₂*y₃\n du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃\n du[3] = k₂*y₂^2\n nothing\nend\nprob = ODEProblem(rober,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))\nsol = solve(prob,Rosenbrock23())\n\nusing Plots\nplot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1))" + ], + "metadata": {}, + "execution_count": null + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using BenchmarkTools\n@btime solve(prob)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Now we want to add the Jacobian. First we have to derive the Jacobian\n$\\frac{df_i}{du_j}$ which is `J[i,j]`. From this we get:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "function rober_jac(J,u,p,t)\n y₁,y₂,y₃ = u\n k₁,k₂,k₃ = p\n J[1,1] = k₁ * -1\n J[2,1] = k₁\n J[3,1] = 0\n J[1,2] = y₃ * k₃\n J[2,2] = y₂ * k₂ * -2 + y₃ * k₃ * -1\n J[3,2] = y₂ * 2 * k₂\n J[1,3] = k₃ * y₂\n J[2,3] = k₃ * y₂ * -1\n J[3,3] = 0\n nothing\nend\nf = ODEFunction(rober, jac=rober_jac)\nprob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))\n\n@btime solve(prob_jac)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Automatic Derivation of Jacobian Functions\n\nBut that was hard! If you want to take the symbolic Jacobian of numerical\ncode, we can make use of [ModelingToolkit.jl](https://github.com/JuliaDiffEq/ModelingToolkit.jl)\nto symbolicify the numerical code and do the symbolic calculation and return\nthe Julia code for this." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using ModelingToolkit\nde = modelingtoolkitize(prob)\nModelingToolkit.generate_jacobian(de...)[2] # Second is in-place" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "which outputs:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + ":((##MTIIPVar#376, u, p, t)->begin\n #= C:\\Users\\accou\\.julia\\packages\\ModelingToolkit\\czHtj\\src\\utils.jl:65 =#\n #= C:\\Users\\accou\\.julia\\packages\\ModelingToolkit\\czHtj\\src\\utils.jl:66 =#\n let (x₁, x₂, x₃, α₁, α₂, α₃) = (u[1], u[2], u[3], p[1], p[2], p[3])\n ##MTIIPVar#376[1] = α₁ * -1\n ##MTIIPVar#376[2] = α₁\n ##MTIIPVar#376[3] = 0\n ##MTIIPVar#376[4] = x₃ * α₃\n ##MTIIPVar#376[5] = x₂ * α₂ * -2 + x₃ * α₃ * -1\n ##MTIIPVar#376[6] = x₂ * 2 * α₂\n ##MTIIPVar#376[7] = α₃ * x₂\n ##MTIIPVar#376[8] = α₃ * x₂ * -1\n ##MTIIPVar#376[9] = 0\n end\n #= C:\\Users\\accou\\.julia\\packages\\ModelingToolkit\\czHtj\\src\\utils.jl:67 =#\n nothing\n end)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Now let's use that to give the analytical solution Jacobian:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "jac = eval(ModelingToolkit.generate_jacobian(de...)[2])\nf = ODEFunction(rober, jac=jac)\nprob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Declaring a Sparse Jacobian\n\nJacobian sparsity is declared by the `jac_prototype` argument in the `ODEFunction`.\nNote that you should only do this if the sparsity is high, for example, 0.1%\nof the matrix is non-zeros, otherwise the overhead of sparse matrices can be higher\nthan the gains from sparse differentiation!\n\nBut as a demonstration, let's build a sparse matrix for the Rober problem. We\ncan do this by gathering the `I` and `J` pairs for the non-zero components, like:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "I = [1,2,1,2,3,1,2]\nJ = [1,1,2,2,2,3,3]\nusing SparseArrays\njac_prototype = sparse(I,J,1.0)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Now this is the sparse matrix prototype that we want to use in our solver, which\nwe then pass like:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "f = ODEFunction(rober, jac=jac, jac_prototype=jac_prototype)\nprob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Automatic Sparsity Detection\n\nOne of the useful companion tools for DifferentialEquations.jl is\n[SparsityDetection.jl](https://github.com/JuliaDiffEq/SparsityDetection.jl).\nThis allows for automatic declaration of Jacobian sparsity types. To see this\nin action, let's look at the 2-dimensional Brusselator equation:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "const N = 32\nconst xyd_brusselator = range(0,stop=1,length=N)\nbrusselator_f(x, y, t) = (((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) * (t >= 1.1) * 5.\nlimit(a, N) = a == N+1 ? 1 : a == 0 ? N : a\nfunction brusselator_2d_loop(du, u, p, t)\n A, B, alpha, dx = p\n alpha = alpha/dx^2\n @inbounds for I in CartesianIndices((N, N))\n i, j = Tuple(I)\n x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]]\n ip1, im1, jp1, jm1 = limit(i+1, N), limit(i-1, N), limit(j+1, N), limit(j-1, N)\n du[i,j,1] = alpha*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) +\n B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t)\n du[i,j,2] = alpha*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) +\n A*u[i,j,1] - u[i,j,1]^2*u[i,j,2]\n end\nend\np = (3.4, 1., 10., step(xyd_brusselator))" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Given this setup, we can give and example `input` and `output` and call `sparsity!`\non our function with the example arguments and it will kick out a sparse matrix\nwith our pattern, that we can turn into our `jac_prototype`." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using SparsityDetection, SparseArrays\ninput = rand(32,32,2)\noutput = similar(input)\nsparsity_pattern = sparsity!(brusselator_2d_loop,output,input,p,0.0)\njac_sparsity = Float64.(sparse(sparsity_pattern))" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Let's double check what our sparsity pattern looks like:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using Plots\nspy(jac_sparsity,markersize=1,colorbar=false,color=:deep)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "That's neat, and would be tedius to build by hand! Now we just pass it to the\n`ODEFunction` like as before:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Build the `ODEProblem`:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "function init_brusselator_2d(xyd)\n N = length(xyd)\n u = zeros(N, N, 2)\n for I in CartesianIndices((N, N))\n x = xyd[I[1]]\n y = xyd[I[2]]\n u[I,1] = 22*(y*(1-y))^(3/2)\n u[I,2] = 27*(x*(1-x))^(3/2)\n end\n u\nend\nu0 = init_brusselator_2d(xyd_brusselator)\nprob_ode_brusselator_2d = ODEProblem(brusselator_2d_loop,\n u0,(0.,11.5),p)\n\nprob_ode_brusselator_2d_sparse = ODEProblem(f,\n u0,(0.,11.5),p)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Now let's see how the version with sparsity compares to the version without:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "@btime solve(prob_ode_brusselator_2d,save_everystep=false)\n@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Declaring Color Vectors for Fast Construction\n\nIf you cannot directly define a Jacobian function, you can use the `colorvec`\nto speed up the Jacobian construction. What the `colorvec` does is allows for\ncalculating multiple columns of a Jacobian simultaniously by using the sparsity\npattern. An explanation of matrix coloring can be found in the\n[MIT 18.337 Lecture Notes](https://mitmath.github.io/18337/lecture9/stiff_odes).\n\nTo perform general matrix coloring, we can use\n[SparseDiffTools.jl](https://github.com/JuliaDiffEq/SparseDiffTools.jl). For\nexample, for the Brusselator equation:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using SparseDiffTools\ncolorvec = matrix_colors(jac_sparsity)\n@show maximum(colorvec)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "This means that we can now calculate the Jacobian in 12 function calls. This is\na nice reduction from 2048 using only automated tooling! To now make use of this\ninside of the ODE solver, you simply need to declare the colorvec:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity,\n colorvec=colorvec)\nprob_ode_brusselator_2d_sparse = ODEProblem(f,\n init_brusselator_2d(xyd_brusselator),\n (0.,11.5),p)\n@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Notice the massive speed enhancement!\n\n## Defining Linear Solver Routines and Jacobian-Free Newton-Krylov\n\nA completely different way to optimize the linear solvers for large sparse\nmatrices is to use a Krylov subpsace method. This requires choosing a linear\nsolver for changing to a Krylov method. Optionally, one can use a Jacobian-free\noperator to reduce the memory requirements.\n\n### Declaring a Jacobian-Free Newton-Krylov Implementation\n\nTo swap the linear solver out, we use the `linsolve` command and choose the\nGMRES linear solver." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "@btime solve(prob_ode_brusselator_2d,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)\n@btime solve(prob_ode_brusselator_2d_sparse,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "For more information on linear solver choices, see the\n[linear solver documentation](http://docs.juliadiffeq.org/latest/features/linear_nonlinear.html).\n\nOn this problem, handling the sparsity correctly seemed to give much more of a\nspeedup than going to a Krylov approach, but that can be dependent on the problem\n(and whether a good preconditioner is found).\n\nWe can also enhance this by using a Jacobian-Free implementation of `f'(x)*v`.\nTo define the Jacobian-Free operator, we can use\n[DiffEqOperators.jl](https://github.com/JuliaDiffEq/DiffEqOperators.jl) to generate\nan operator `JacVecOperator` such that `Jv*v` performs `f'(x)*v` without building\nthe Jacobian matrix." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using DiffEqOperators\nJv = JacVecOperator(brusselator_2d_loop,u0,p,0.0)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "and then we can use this by making it our `jac_prototype`:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "f = ODEFunction(brusselator_2d_loop;jac_prototype=Jv)\nprob_ode_brusselator_2d_jacfree = ODEProblem(f,u0,(0.,11.5),p)\n@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Adding a Preconditioner\n\nThe [linear solver documentation](http://docs.juliadiffeq.org/latest/features/linear_nonlinear.html#IterativeSolvers.jl-Based-Methods-1)\nshows how you can add a preconditioner to the GMRES. For example, you can\nuse packages like [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl)\nto add an algebraic multigrid (AMG) or [IncompleteLU.jl](https://github.com/haampie/IncompleteLU.jl)\nfor an incomplete LU-factorization (iLU)." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using AlgebraicMultigrid\npc = aspreconditioner(ruge_stuben(jac_sparsity))\n@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES(Pl=pc)),save_everystep=false)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "## Using Structured Matrix Types\n\nIf your sparsity pattern follows a specific structure, for example a banded\nmatrix, then you can declare `jac_prototype` to be of that structure and then\nadditional optimizations will come for free. Note that in this case, it is\nnot necessary to provide a `colorvec` since the color vector will be analytically\nderived from the structure of the matrix.\n\nThe matrices which are allowed are those which satisfy the\n[ArrayInterface.jl](https://github.com/JuliaDiffEq/ArrayInterface.jl) interface\nfor automatically-colorable matrices. These include:\n\n- Bidiagonal\n- Tridiagonal\n- SymTridiagonal\n- BandedMatrix ([BandedMatrices.jl](https://github.com/JuliaMatrices/BandedMatrices.jl))\n- BlockBandedMatrix ([BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl))\n\nMatrices which do not satisfy this interface can still be used, but the matrix\ncoloring will not be automatic, and an appropriate linear solver may need to\nbe given (otherwise it will default to attempting an LU-decomposition).\n\n## Sundials-Specific Handling\n\nWhile much of the setup makes the transition to using Sundials automatic, there\nare some differences between the pure Julia implementations and the Sundials\nimplementations which must be taken note of. These are all detailed in the\n[Sundials solver documentation](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html#Sundials.jl-1),\nbut here we will highlight the main details which one should make note of.\n\nDefining a sparse matrix and a Jacobian for Sundials works just like any other\npackage. The core difference is in the choice of the linear solver. With Sundials,\nthe linear solver choice is done with a Symbol in the `linear_solver` from a\npreset list. Particular choices of note are `:Band` for a banded matrix and\n`:GMRES` for using GMRES. If you are using Sundials, `:GMRES` will not require\ndefining the JacVecOperator, and instead will always make use of a Jacobian-Free\nNewton Krylov (with numerical differentiation). Thus on this problem we could do:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using Sundials\n# Sparse Version\n@btime solve(prob_ode_brusselator_2d_sparse,CVODE_BDF(),save_everystep=false)\n# GMRES Version: Doesn't require any extra stuff!\n@btime solve(prob_ode_brusselator_2d,CVODE_BDF(linear_solver=:GMRES),save_everystep=false)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Details for setting up a preconditioner with Sundials can be found at the\n[Sundials solver page](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html#Sundials.jl-1).\n\n## Handling Mass Matrices\n\nInstead of just defining an ODE as $u' = f(u,p,t)$, it can be common to express\nthe differential equation in the form with a mass matrix:\n\n$$Mu' = f(u,p,t)$$\n\nwhere $M$ is known as the mass matrix. Let's solve the Robertson equation.\nAt the top we wrote this equation as:\n\n$$\\begin{align}\ndy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\\\\ndy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\\\\ndy_3 &= 3*10^7 y_{3}^2 \\\\\n\\end{align}$$\n\nBut we can instead write this with a conservation relation:\n\n$$\\begin{align}\ndy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\\\\ndy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\\\\n1 &= y_{1} + y_{2} + y_{3} \\\\\n\\end{align}$$\n\nIn this form, we can write this as a mass matrix ODE where $M$ is singular\n(this is another form of a differential-algebraic equation (DAE)). Here, the\nlast row of `M` is just zero. We can implement this form as:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using DifferentialEquations\nfunction rober(du,u,p,t)\n y₁,y₂,y₃ = u\n k₁,k₂,k₃ = p\n du[1] = -k₁*y₁+k₃*y₂*y₃\n du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃\n du[3] = y₁ + y₂ + y₃ - 1\n nothing\nend\nM = [1. 0 0\n 0 1. 0\n 0 0 0]\nf = ODEFunction(rober,mass_matrix=M)\nprob_mm = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))\nsol = solve(prob_mm,Rodas5())\n\nplot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1))" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Note that if your mass matrix is singular, i.e. your system is a DAE, then you\nneed to make sure you choose\n[a solver that is compatible with DAEs](http://docs.juliadiffeq.org/latest/solvers/dae_solve.html#Full-List-of-Methods-1)" + ], + "metadata": {} + } + ], + "nbformat_minor": 2, + "metadata": { + "language_info": { + "file_extension": ".jl", + "mimetype": "application/julia", + "name": "julia", + "version": "1.2.0" + }, + "kernelspec": { + "name": "julia-1.2", + "display_name": "Julia 1.2.0", + "language": "julia" + } + }, + "nbformat": 4 +} diff --git a/notebook/models/03-diffeqbio_I_introduction.ipynb b/notebook/models/03-diffeqbio_I_introduction.ipynb index 737eaef8..56e79229 100644 --- a/notebook/models/03-diffeqbio_I_introduction.ipynb +++ b/notebook/models/03-diffeqbio_I_introduction.ipynb @@ -59,7 +59,7 @@ "outputs": [], "cell_type": "code", "source": [ - "latexify(repressilator)" + "latexify(repressilator, cdot=false)" ], "metadata": {}, "execution_count": null @@ -172,7 +172,23 @@ { "cell_type": "markdown", "source": [ - "The corresponding Chemical Langevin Equation SDE is then\n\n$$\ndX_t = \\left(c_1 X - c_2 X + c_3 \\right) dt + \\left( \\sqrt{c_1 X} - \\sqrt{c_2 X} + \\sqrt{c_3} \\right)dW_t,\n$$\n\nwhere $W_t$ denotes a standard Brownian Motion. We can solve the CLE SDE model\nby creating an SDEProblem and solving it similar to what we did for ODEs above:" + "The corresponding Chemical Langevin Equation SDE is then" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "latexify(bdp, noise=true, cdot=false)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "where each $W_i(t)$ denotes an independent Brownian Motion. We can solve the CLE\nSDE model by creating an `SDEProblem` and solving it similar to what we did for\nODEs above:" ], "metadata": {} }, @@ -196,7 +212,7 @@ "outputs": [], "cell_type": "code", "source": [ - "latexify(jacobianexprs(repressilator))" + "latexify(jacobianexprs(repressilator), cdot=false)" ], "metadata": {}, "execution_count": null @@ -215,11 +231,11 @@ "file_extension": ".jl", "mimetype": "application/julia", "name": "julia", - "version": "1.1.1" + "version": "1.2.0" }, "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", + "name": "julia-1.2", + "display_name": "Julia 1.2.0", "language": "julia" } }, diff --git a/notebook/models/04-diffeqbio_II_networkproperties.ipynb b/notebook/models/04-diffeqbio_II_networkproperties.ipynb index 16c62cf1..f201d836 100644 --- a/notebook/models/04-diffeqbio_II_networkproperties.ipynb +++ b/notebook/models/04-diffeqbio_II_networkproperties.ipynb @@ -1,558 +1,6096 @@ { - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# DiffEqBiological Tutorial II: Network Properties API\n### Samuel Isaacson\n\nThe [DiffEqBiological\nAPI](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html) provides a\ncollection of functions for easily accessing network properties, and for\nincrementally building and extending a network. In this tutorial we'll go\nthrough the API, and then illustrate how to programmatically construct a\nnetwork.\n\nWe'll illustrate the API using a toggle-switch like network that contains a\nvariety of different reaction types:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "using DifferentialEquations, DiffEqBiological, Latexify, Plots\nfmt = :svg\npyplot(fmt=fmt)\nrn = @reaction_network begin\n hillr(D₂,α,K,n), ∅ --> m₁\n hillr(D₁,α,K,n), ∅ --> m₂\n (δ,γ), m₁ ↔ ∅\n (δ,γ), m₂ ↔ ∅\n β, m₁ --> m₁ + P₁\n β, m₂ --> m₂ + P₂\n μ, P₁ --> ∅\n μ, P₂ --> ∅\n (k₊,k₋), 2P₁ ↔ D₁ \n (k₊,k₋), 2P₂ ↔ D₂\n (k₊,k₋), P₁+P₂ ↔ T\nend α K n δ γ β μ k₊ k₋;" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "This corresponds to the chemical reaction network given by" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "latexify(rn; env=:chemical)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "---\n## Network Properties\n[Basic\nproperties](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Basic-properties-1)\nof the generated network include the `speciesmap` and `paramsmap` functions we\nexamined in the last tutorial, along with the corresponding `species` and\n`params` functions:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "species(rn)" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "params(rn)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The numbers of species, parameters and reactions can be accessed using\n`numspecies(rn)`, `numparams(rn)` and `numreactions(rn)`.\n\nA number of functions are available to access [properties of\nreactions](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Reaction-Properties-1)\nwithin the generated network, including `substrates`, `products`, `dependents`,\n`ismassaction`, `substratestoich`, `substratesymstoich`, `productstoich`,\n`productsymstoich`, and `netstoich`. Each of these functions takes two\narguments, the reaction network `rn` and the index of the reaction to query\ninformation about. For example, to find the substrate symbols and their\ncorresponding stoichiometries for the 11th reaction, `2P₁ --> D₁`, we would use" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "substratesymstoich(rn, 11)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Broadcasting works on all these functions, allowing the construction of a vector\nholding the queried information across all reactions, i.e." - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "substratesymstoich.(rn, 1:numreactions(rn))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "To see the net stoichiometries for all reactions we would use" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "netstoich.(rn, 1:numreactions(rn))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here the first integer in each pair corresponds to the index of the species\n(with symbol `species(rn)[index]`). The second integer corresponds to the net\nstoichiometric coefficient of the species within the reaction. `substratestoich`\nand `productstoich` are defined similarly. \n\nSeveral functions are also provided that calculate different types of\n[dependency\ngraphs](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Dependency-Graphs-1).\nThese include `rxtospecies_depgraph`, which provides a mapping from reaction\nindex to the indices of species whose population changes when the reaction\noccurs:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "rxtospecies_depgraph(rn)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here the last row indicates that the species with indices `[3,4,7]` will change\nvalues when the reaction `T --> P₁ + P₂` occurs. To confirm these are the\ncorrect species we can look at" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "species(rn)[[3,4,7]]" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The `speciestorx_depgraph` similarly provides a mapping from species to reactions \nfor which their *rate laws* depend on that species. These correspond to all reactions\nfor which the given species is in the `dependent` set of the reaction. We can verify this\nfor the first species, `m₁`:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "speciestorx_depgraph(rn)[1]" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "findall(depset -> in(:m₁, depset), dependents.(rn, 1:numreactions(rn)))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Finally, `rxtorx_depgraph` provides a mapping that shows when a given reaction\noccurs, which other reactions have rate laws that involve species whose value\nwould have changed:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "rxtorx_depgraph(rn)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "#### Note on Using Network Property API Functions\nMany basic network query and reaction property functions are simply accessors,\nreturning information that is already stored within the generated\n`reaction_network`. For these functions, modifying the returned data structures\nmay lead to inconsistent internal state within the network. As such, they should\nbe used for accessing, but not modifying, network properties. The [API\ndocumentation](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html)\nindicates which functions return newly allocated data structures and which\nreturn data stored within the `reaction_network`.\n\n---\n## Incremental Construction of Networks\nThe `@reaction_network` macro is monolithic, in that it not only constructs and\nstores basic network properties such as the reaction stoichiometries, but also\ngenerates **everything** needed to immediately solve ODE, SDE and jump models\nusing the network. This includes Jacobian functions, noise functions, and jump\nfunctions for each reaction. While this allows for a compact interface to the\nDifferentialEquations.jl solvers, it can also be computationally expensive for\nlarge networks, where a user may only wish to solve one type of problem and/or\nhave fine-grained control over what is generated. In addition, some types of\nreaction network structures are more amenable to being constructed\nprogrammatically, as opposed to writing out all reactions by hand within one\nmacro. For these reasons DiffEqBiological provides two additional macros that\nonly *initially* setup basic reaction network properties, and which can be\nextended through a programmatic interface: `@min_reaction_network` and\n`@empty_reaction_network`. We now give an introduction to constructing these\nmore minimal network representations, and how they can be programmatically\nextended. See also the relevant [API\nsection](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Reaction-Network-Generation-Macros-1).\n\nThe `@min_reaction_network` macro works identically to the `@reaction_network`\nmacro, but the generated network will only be complete with respect to its\nrepresentation of chemical network properties (i.e. species, parameters and\nreactions). No ODE, SDE or jump models are generated during the macro call. It\ncan subsequently be extended with the addition of new species, parameters or\nreactions. The `@empty_reaction_network` allocates an empty network structure\nthat can also be extended using the programmatic interface. For example, consider\na partial version of the toggle-switch like network we defined above:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "rnmin = @min_reaction_network begin\n (δ,γ), m₁ ↔ ∅\n (δ,γ), m₂ ↔ ∅\n β, m₁ --> m₁ + P₁\n β, m₂ --> m₂ + P₂\n μ, P₁ --> ∅\n μ, P₂ --> ∅\nend δ γ β μ;" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here we have left out the first two, and last three, reactions from the original\n`reaction_network`. To expand the network until it is functionally equivalent to\nthe original model we add back in the missing species, parameters, and *finally*\nthe missing reactions. Note, it is required that species and parameters be\ndefined before any reactions using them are added. The necessary network\nextension functions are given by `addspecies!`, `addparam!` and `addreaction!`,\nand described in the\n[API](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Functions-to-Add-Species,-Parameters-and-Reactions-to-a-Network-1). To complete `rnmin` we first add the relevant\nspecies:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "addspecies!(rnmin, :D₁)\naddspecies!(rnmin, :D₂)\naddspecies!(rnmin, :T)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Next we add the needed parameters" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "addparam!(rnmin, :α)\naddparam!(rnmin, :K)\naddparam!(rnmin, :n)\naddparam!(rnmin, :k₊)\naddparam!(rnmin, :k₋)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Note, both `addspecies!` and `addparam!` also accept strings encoding the\nvariable names (which are then converted to `Symbol`s internally).\n\nWe are now ready to add the missing reactions. The API provides two forms of the\n`addreaction!` function, one takes expressions analogous to what one would write\nin the macro:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "addreaction!(rnmin, :(hillr(D₁,α,K,n)), :(∅ --> m₂))\naddreaction!(rnmin, :((k₊,k₋)), :(2P₂ ↔ D₂))\naddreaction!(rnmin, :k₊, :(2P₁ --> D₁))\naddreaction!(rnmin, :k₋, :(D₁ --> 2P₁))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The rate can be an expression or symbol as above, but can also just be a\nnumeric value. The second form of `addreaction!` takes tuples of\n`Pair{Symbol,Int}` that encode the stoichiometric coefficients of substrates and\nreactants:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "# signature is addreaction!(rnmin, paramexpr, substratestoich, productstoich)\naddreaction!(rnmin, :(hillr(D₂,α,K,n)), (), (:m₁ => 1,))\naddreaction!(rnmin, :k₊, (:P₁=>1, :P₂=>1), (:T=>1,))\naddreaction!(rnmin, :k₋, (:T=>1,), (:P₁=>1, :P₂=>1))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Let's check that `rn` and `rnmin` have the same set of species:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "setdiff(species(rn), species(rnmin))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "the same set of params:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "setdiff(params(rn), params(rnmin))" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "and the final reaction has the same substrates, reactions, and rate expression:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "rxidx = numreactions(rn)\nsetdiff(substrates(rn, rxidx), substrates(rnmin, rxidx))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "setdiff(products(rn, rxidx), products(rnmin, rxidx))" - ], - "metadata": {}, - "execution_count": null - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "rateexpr(rn, rxidx) == rateexpr(rnmin, rxidx)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "---\n## Extending Incrementally Generated Networks to Include ODEs, SDEs or Jumps\nOnce a network generated from `@min_reaction_network` or\n`@empty_reaction_network` has had all the associated species, parameters and\nreactions filled in, corresponding ODE, SDE or jump models can be constructed.\nThe relevant API functions are `addodes!`, `addsdes!` and `addjumps!`. One\nbenefit to contructing models with these functions is that they offer more\nfine-grained control over what actually gets constructed. For example,\n`addodes!` has the optional keyword argument, `build_jac`, which if set to\n`false` will disable construction of symbolic Jacobians and functions for\nevaluating Jacobians. For large networks this can give a significant speed-up in\nthe time required for constructing an ODE model. Each function and its\nassociated keyword arguments are described in the API section, [Functions to add\nODEs, SDEs or Jumps to a\nNetwork](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Functions-to-Add-ODEs,-SDEs-or-Jumps-to-a-Network-1).\n\nLet's extend `rnmin` to include the needed functions for use in ODE\nsolvers:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "addodes!(rnmin)" - ], - "metadata": {}, - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "The [Generated Functions for\nModels](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Generated-Functions-for-Models-1)\nsection of the API shows what functions have been generated. For ODEs these\ninclude `oderhsfun(rnmin)`, which returns a function of the form `f(du,u,p,t)`\nwhich evaluates the ODEs (i.e. the time derivatives of `u`) within `du`. For\neach generated function, the corresponding expressions from which it was\ngenerated can be retrieved using accessors from the [Generated\nExpressions](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Generated-Expressions-1)\nsection of the API. The equations within `du` can be retrieved using the\n`odeexprs(rnmin)` function. For example:" - ], - "metadata": {} - }, - { - "outputs": [], - "cell_type": "code", - "source": [ - "odeexprs(rnmin)" - ], - "metadata": {}, - "execution_count": null - }, + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# DiffEqBiological Tutorial II: Network Properties API\n", + "### Samuel Isaacson\n", + "\n", + "The [DiffEqBiological\n", + "API](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html) provides a\n", + "collection of functions for easily accessing network properties, and for\n", + "incrementally building and extending a network. In this tutorial we'll go\n", + "through the API, and then illustrate how to programmatically construct a\n", + "network.\n", + "\n", + "We'll illustrate the API using a toggle-switch like network that contains a\n", + "variety of different reaction types:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "using DifferentialEquations, DiffEqBiological, Latexify, Plots\n", + "fmt = :svg\n", + "pyplot(fmt=fmt)\n", + "rn = @reaction_network begin\n", + " hillr(D₂,α,K,n), ∅ --> m₁\n", + " hillr(D₁,α,K,n), ∅ --> m₂\n", + " (δ,γ), m₁ ↔ ∅\n", + " (δ,γ), m₂ ↔ ∅\n", + " β, m₁ --> m₁ + P₁\n", + " β, m₂ --> m₂ + P₂\n", + " μ, P₁ --> ∅\n", + " μ, P₂ --> ∅\n", + " (k₊,k₋), 2P₁ ↔ D₁ \n", + " (k₊,k₋), 2P₂ ↔ D₂\n", + " (k₊,k₋), P₁+P₂ ↔ T\n", + "end α K n δ γ β μ k₊ k₋;" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This corresponds to the chemical reaction network given by" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "Using Latexify we can see the ODEs themselves to compare with these expressions:" + "data": { + "text/latex": [ + "\\begin{align}\n", + "\\require{mhchem}\n", + "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + D_2^{n}}] m_{1}}\\\\\n", + "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + D_1^{n}}] m_{2}}\\\\\n", + "\\ce{ m_{1} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", + "\\ce{ m_{2} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", + "\\ce{ m_{1} &->[\\beta] m_{1} + P_{1}}\\\\\n", + "\\ce{ m_{2} &->[\\beta] m_{2} + P_{2}}\\\\\n", + "\\ce{ P_{1} &->[\\mu] \\varnothing}\\\\\n", + "\\ce{ P_{2} &->[\\mu] \\varnothing}\\\\\n", + "\\ce{ 2 \\cdot P_1 &<=>[k_{+}][k_{-}] D_{1}}\\\\\n", + "\\ce{ 2 \\cdot P_2 &<=>[k_{+}][k_{-}] D_{2}}\\\\\n", + "\\ce{ P_{1} + P_{2} &<=>[k_{+}][k_{-}] T}\n", + "\\end{align}\n" ], - "metadata": {} - }, + "text/plain": [ + "L\"\\begin{align}\n", + "\\require{mhchem}\n", + "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + D_2^{n}}] m_{1}}\\\\\n", + "\\ce{ \\varnothing &->[\\frac{\\alpha \\cdot K^{n}}{K^{n} + D_1^{n}}] m_{2}}\\\\\n", + "\\ce{ m_{1} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", + "\\ce{ m_{2} &<=>[\\delta][\\gamma] \\varnothing}\\\\\n", + "\\ce{ m_{1} &->[\\beta] m_{1} + P_{1}}\\\\\n", + "\\ce{ m_{2} &->[\\beta] m_{2} + P_{2}}\\\\\n", + "\\ce{ P_{1} &->[\\mu] \\varnothing}\\\\\n", + "\\ce{ P_{2} &->[\\mu] \\varnothing}\\\\\n", + "\\ce{ 2 \\cdot P_1 &<=>[k_{+}][k_{-}] D_{1}}\\\\\n", + "\\ce{ 2 \\cdot P_2 &<=>[k_{+}][k_{-}] D_{2}}\\\\\n", + "\\ce{ P_{1} + P_{2} &<=>[k_{+}][k_{-}] T}\n", + "\\end{align}\n", + "\"" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "latexify(rn; env=:chemical)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Network Properties\n", + "[Basic\n", + "properties](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Basic-properties-1)\n", + "of the generated network include the `speciesmap` and `paramsmap` functions we\n", + "examined in the last tutorial, along with the corresponding `species` and\n", + "`params` functions:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ { - "outputs": [], - "cell_type": "code", - "source": [ - "latexify(rnmin)" - ], - "metadata": {}, - "execution_count": null - }, + "data": { + "text/plain": [ + "7-element Array{Symbol,1}:\n", + " :m₁\n", + " :m₂\n", + " :P₁\n", + " :P₂\n", + " :D₁\n", + " :D₂\n", + " :T " + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "species(rn)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "For ODEs two other functions are generated by `addodes!`. `jacfun(rnmin)` will\nreturn the generated Jacobian evaluation function, `fjac(dJ,u,p,t)`, which given\nthe current solution `u` evaluates the Jacobian within `dJ`.\n`jacobianexprs(rnmin)` gives the corresponding matrix of expressions, which can\nbe used with Latexify to see the Jacobian:" - ], - "metadata": {} - }, + "data": { + "text/plain": [ + "9-element Array{Symbol,1}:\n", + " :α \n", + " :K \n", + " :n \n", + " :δ \n", + " :γ \n", + " :β \n", + " :μ \n", + " :k₊\n", + " :k₋" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "params(rn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The numbers of species, parameters and reactions can be accessed using\n", + "`numspecies(rn)`, `numparams(rn)` and `numreactions(rn)`.\n", + "\n", + "A number of functions are available to access [properties of\n", + "reactions](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Reaction-Properties-1)\n", + "within the generated network, including `substrates`, `products`, `dependents`,\n", + "`ismassaction`, `substratestoich`, `substratesymstoich`, `productstoich`,\n", + "`productsymstoich`, and `netstoich`. Each of these functions takes two\n", + "arguments, the reaction network `rn` and the index of the reaction to query\n", + "information about. For example, to find the substrate symbols and their\n", + "corresponding stoichiometries for the 11th reaction, `2P₁ --> D₁`, we would use" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ { - "outputs": [], - "cell_type": "code", - "source": [ - "latexify(jacobianexprs(rnmin))" - ], - "metadata": {}, - "execution_count": null - }, + "data": { + "text/plain": [ + "1-element Array{DiffEqBiological.ReactantStruct,1}:\n", + " DiffEqBiological.ReactantStruct(:P₁, 2)" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "substratesymstoich(rn, 11)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Broadcasting works on all these functions, allowing the construction of a vector\n", + "holding the queried information across all reactions, i.e." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "`addodes!` also generates a function that evaluates the Jacobian of the ODE\nderivative functions with respect to the parameters. `paramjacfun(rnmin)` then\nreturns the generated function. It has the form `fpjac(dPJ,u,p,t)`, which\ngiven the current solution `u` evaluates the Jacobian matrix with respect to\nparameters `p` within `dPJ`. For use in DifferentialEquations.jl solvers, an\n[`ODEFunction`](http://docs.juliadiffeq.org/latest/features/performance_overloads.html)\nrepresentation of the ODEs is available from `odefun(rnmin)`. \n\n`addsdes!` and `addjumps!` work similarly to complete the network for use in\nStochasticDiffEq and DiffEqJump solvers. \n\n#### Note on Using Generated Function and Expression API Functions\nThe generated functions and expressions accessible through the API require first\ncalling the appropriate `addodes!`, `addsdes` or `addjumps` function. These are\nresponsible for actually constructing the underlying functions and expressions.\nThe API accessors simply return already constructed functions and expressions\nthat are stored within the `reaction_network` structure.\n\n---\n## Example of Generating a Network Programmatically\nFor a user directly typing in a reaction network, it is generally easier to use\nthe `@min_reaction_network` or `@reaction_network` macros to fully specify\nreactions. However, for large, structured networks it can be much easier to\ngenerate the network programmatically. For very large networks, with tens of\nthousands of reactions, the form of `addreaction!` that uses stoichiometric\ncoefficients should be preferred as it offers substantially better performance.\nTo put together everything we've seen, let's generate the network corresponding\nto a 1D continuous time random walk, approximating the diffusion of molecules\nwithin an interval.\n\nThe basic \"reaction\" network we wish to study is \n\n$$\nu_1 \\leftrightarrows u_2 \\leftrightarrows u_3 \\cdots \\leftrightarrows u_{N}\n$$\n\nfor $N$ lattice sites on $[0,1]$. For $h = 1/N$ the lattice spacing, we'll\nassume the rate molecules hop from their current site to any particular neighbor\nis just $h^{-2}$. We can interpret this hopping process as a collection of\n$2N-2$ \"reactions\", with the form $u_i \\to u_j$ for $j=i+1$ or $j=i-1$. We construct\nthe corresponding reaction network as follows. First we set values for the basic\nparameters:" - ], - "metadata": {} - }, + "data": { + "text/plain": [ + "16-element Array{Array{DiffEqBiological.ReactantStruct,1},1}:\n", + " [] \n", + " [] \n", + " [ReactantStruct(:m₁, 1)] \n", + " [] \n", + " [ReactantStruct(:m₂, 1)] \n", + " [] \n", + " [ReactantStruct(:m₁, 1)] \n", + " [ReactantStruct(:m₂, 1)] \n", + " [ReactantStruct(:P₁, 1)] \n", + " [ReactantStruct(:P₂, 1)] \n", + " [ReactantStruct(:P₁, 2)] \n", + " [ReactantStruct(:D₁, 1)] \n", + " [ReactantStruct(:P₂, 2)] \n", + " [ReactantStruct(:D₂, 1)] \n", + " [ReactantStruct(:P₁, 1), ReactantStruct(:P₂, 1)]\n", + " [ReactantStruct(:T, 1)] " + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "substratesymstoich.(rn, 1:numreactions(rn))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To see the net stoichiometries for all reactions we would use" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ { - "outputs": [], - "cell_type": "code", - "source": [ - "N = 64\nh = 1 / N" - ], - "metadata": {}, - "execution_count": null - }, + "data": { + "text/plain": [ + "16-element Array{Array{Pair{Int64,Int64},1},1}:\n", + " [1=>1] \n", + " [2=>1] \n", + " [1=>-1] \n", + " [1=>1] \n", + " [2=>-1] \n", + " [2=>1] \n", + " [3=>1] \n", + " [4=>1] \n", + " [3=>-1] \n", + " [4=>-1] \n", + " [3=>-2, 5=>1] \n", + " [3=>2, 5=>-1] \n", + " [4=>-2, 6=>1] \n", + " [4=>2, 6=>-1] \n", + " [3=>-1, 4=>-1, 7=>1]\n", + " [3=>1, 4=>1, 7=>-1] " + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "netstoich.(rn, 1:numreactions(rn))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here the first integer in each pair corresponds to the index of the species\n", + "(with symbol `species(rn)[index]`). The second integer corresponds to the net\n", + "stoichiometric coefficient of the species within the reaction. `substratestoich`\n", + "and `productstoich` are defined similarly. \n", + "\n", + "Several functions are also provided that calculate different types of\n", + "[dependency\n", + "graphs](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Dependency-Graphs-1).\n", + "These include `rxtospecies_depgraph`, which provides a mapping from reaction\n", + "index to the indices of species whose population changes when the reaction\n", + "occurs:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "then we create an empty network, and add each species" - ], - "metadata": {} - }, + "data": { + "text/plain": [ + "16-element Array{Array{Int64,1},1}:\n", + " [1] \n", + " [2] \n", + " [1] \n", + " [1] \n", + " [2] \n", + " [2] \n", + " [3] \n", + " [4] \n", + " [3] \n", + " [4] \n", + " [3, 5] \n", + " [3, 5] \n", + " [4, 6] \n", + " [4, 6] \n", + " [3, 4, 7]\n", + " [3, 4, 7]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "rxtospecies_depgraph(rn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here the last row indicates that the species with indices `[3,4,7]` will change\n", + "values when the reaction `T --> P₁ + P₂` occurs. To confirm these are the\n", + "correct species we can look at" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ { - "outputs": [], - "cell_type": "code", - "source": [ - "rn = @empty_reaction_network\n\nfor i = 1:N\n addspecies!(rn, Symbol(:u, i))\nend" - ], - "metadata": {}, - "execution_count": null - }, + "data": { + "text/plain": [ + "3-element Array{Symbol,1}:\n", + " :P₁\n", + " :P₂\n", + " :T " + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "species(rn)[[3,4,7]]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `speciestorx_depgraph` similarly provides a mapping from species to reactions \n", + "for which their *rate laws* depend on that species. These correspond to all reactions\n", + "for which the given species is in the `dependent` set of the reaction. We can verify this\n", + "for the first species, `m₁`:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "We next add one parameter `β`, which we will set equal to the hopping rate \nof molecules, $h^{-2}$:" - ], - "metadata": {} - }, + "data": { + "text/plain": [ + "2-element Array{Int64,1}:\n", + " 3\n", + " 7" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "speciestorx_depgraph(rn)[1]" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ { - "outputs": [], - "cell_type": "code", - "source": [ - "addparam!(rn, :β)" - ], - "metadata": {}, - "execution_count": null - }, + "data": { + "text/plain": [ + "2-element Array{Int64,1}:\n", + " 3\n", + " 7" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "findall(depset -> in(:m₁, depset), dependents.(rn, 1:numreactions(rn)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, `rxtorx_depgraph` provides a mapping that shows when a given reaction\n", + "occurs, which other reactions have rate laws that involve species whose value\n", + "would have changed:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "Finally, we add in the $2N-2$ possible hopping reactions:" - ], - "metadata": {} - }, + "data": { + "text/plain": [ + "16-element Array{Array{Int64,1},1}:\n", + " [1, 3, 7] \n", + " [2, 5, 8] \n", + " [3, 7] \n", + " [3, 4, 7] \n", + " [5, 8] \n", + " [5, 6, 8] \n", + " [7, 9, 11, 15] \n", + " [8, 10, 13, 15] \n", + " [9, 11, 15] \n", + " [10, 13, 15] \n", + " [2, 9, 11, 12, 15] \n", + " [2, 9, 11, 12, 15] \n", + " [1, 10, 13, 14, 15] \n", + " [1, 10, 13, 14, 15] \n", + " [9, 10, 11, 13, 15, 16]\n", + " [9, 10, 11, 13, 15, 16]" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "rxtorx_depgraph(rn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Note on Using Network Property API Functions\n", + "Many basic network query and reaction property functions are simply accessors,\n", + "returning information that is already stored within the generated\n", + "`reaction_network`. For these functions, modifying the returned data structures\n", + "may lead to inconsistent internal state within the network. As such, they should\n", + "be used for accessing, but not modifying, network properties. The [API\n", + "documentation](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html)\n", + "indicates which functions return newly allocated data structures and which\n", + "return data stored within the `reaction_network`.\n", + "\n", + "---\n", + "## Incremental Construction of Networks\n", + "The `@reaction_network` macro is monolithic, in that it not only constructs and\n", + "stores basic network properties such as the reaction stoichiometries, but also\n", + "generates **everything** needed to immediately solve ODE, SDE and jump models\n", + "using the network. This includes Jacobian functions, noise functions, and jump\n", + "functions for each reaction. While this allows for a compact interface to the\n", + "DifferentialEquations.jl solvers, it can also be computationally expensive for\n", + "large networks, where a user may only wish to solve one type of problem and/or\n", + "have fine-grained control over what is generated. In addition, some types of\n", + "reaction network structures are more amenable to being constructed\n", + "programmatically, as opposed to writing out all reactions by hand within one\n", + "macro. For these reasons DiffEqBiological provides two additional macros that\n", + "only *initially* setup basic reaction network properties, and which can be\n", + "extended through a programmatic interface: `@min_reaction_network` and\n", + "`@empty_reaction_network`. We now give an introduction to constructing these\n", + "more minimal network representations, and how they can be programmatically\n", + "extended. See also the relevant [API\n", + "section](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Reaction-Network-Generation-Macros-1).\n", + "\n", + "The `@min_reaction_network` macro works identically to the `@reaction_network`\n", + "macro, but the generated network will only be complete with respect to its\n", + "representation of chemical network properties (i.e. species, parameters and\n", + "reactions). No ODE, SDE or jump models are generated during the macro call. It\n", + "can subsequently be extended with the addition of new species, parameters or\n", + "reactions. The `@empty_reaction_network` allocates an empty network structure\n", + "that can also be extended using the programmatic interface. For example, consider\n", + "a partial version of the toggle-switch like network we defined above:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "rnmin = @min_reaction_network begin\n", + " (δ,γ), m₁ ↔ ∅\n", + " (δ,γ), m₂ ↔ ∅\n", + " β, m₁ --> m₁ + P₁\n", + " β, m₂ --> m₂ + P₂\n", + " μ, P₁ --> ∅\n", + " μ, P₂ --> ∅\n", + "end δ γ β μ;" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here we have left out the first two, and last three, reactions from the original\n", + "`reaction_network`. To expand the network until it is functionally equivalent to\n", + "the original model we add back in the missing species, parameters, and *finally*\n", + "the missing reactions. Note, it is required that species and parameters be\n", + "defined before any reactions using them are added. The necessary network\n", + "extension functions are given by `addspecies!`, `addparam!` and `addreaction!`,\n", + "and described in the\n", + "[API](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Functions-to-Add-Species,-Parameters-and-Reactions-to-a-Network-1). To complete `rnmin` we first add the relevant\n", + "species:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "addspecies!(rnmin, :D₁)\n", + "addspecies!(rnmin, :D₂)\n", + "addspecies!(rnmin, :T)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we add the needed parameters" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "addparam!(rnmin, :α)\n", + "addparam!(rnmin, :K)\n", + "addparam!(rnmin, :n)\n", + "addparam!(rnmin, :k₊)\n", + "addparam!(rnmin, :k₋)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note, both `addspecies!` and `addparam!` also accept strings encoding the\n", + "variable names (which are then converted to `Symbol`s internally).\n", + "\n", + "We are now ready to add the missing reactions. The API provides two forms of the\n", + "`addreaction!` function, one takes expressions analogous to what one would write\n", + "in the macro:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "addreaction!(rnmin, :(hillr(D₁,α,K,n)), :(∅ --> m₂))\n", + "addreaction!(rnmin, :((k₊,k₋)), :(2P₂ ↔ D₂))\n", + "addreaction!(rnmin, :k₊, :(2P₁ --> D₁))\n", + "addreaction!(rnmin, :k₋, :(D₁ --> 2P₁))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The rate can be an expression or symbol as above, but can also just be a\n", + "numeric value. The second form of `addreaction!` takes tuples of\n", + "`Pair{Symbol,Int}` that encode the stoichiometric coefficients of substrates and\n", + "reactants:" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "# signature is addreaction!(rnmin, paramexpr, substratestoich, productstoich)\n", + "addreaction!(rnmin, :(hillr(D₂,α,K,n)), (), (:m₁ => 1,))\n", + "addreaction!(rnmin, :k₊, (:P₁=>1, :P₂=>1), (:T=>1,))\n", + "addreaction!(rnmin, :k₋, (:T=>1,), (:P₁=>1, :P₂=>1))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's check that `rn` and `rnmin` have the same set of species:" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ { - "outputs": [], - "cell_type": "code", - "source": [ - "for i = 1:N\n (i < N) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i+1)=>1,))\n (i > 1) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i-1)=>1,))\nend" - ], - "metadata": {}, - "execution_count": null - }, + "data": { + "text/plain": [ + "0-element Array{Symbol,1}" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "setdiff(species(rn), species(rnmin))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "the same set of params:" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "Let's first construct an ODE model for the network" - ], - "metadata": {} - }, + "data": { + "text/plain": [ + "0-element Array{Symbol,1}" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "setdiff(params(rn), params(rnmin))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "and the final reaction has the same substrates, reactions, and rate expression:" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ { - "outputs": [], - "cell_type": "code", - "source": [ - "addodes!(rn)" - ], - "metadata": {}, - "execution_count": null - }, + "data": { + "text/plain": [ + "0-element Array{Symbol,1}" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "rxidx = numreactions(rn)\n", + "setdiff(substrates(rn, rxidx), substrates(rnmin, rxidx))" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "We now need to specify the initial condition, parameter vector and time interval\nto solve on. We start with 10000 molecules placed at the center of the domain,\nand setup an `ODEProblem` to solve:" - ], - "metadata": {} - }, + "data": { + "text/plain": [ + "0-element Array{Symbol,1}" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "setdiff(products(rn, rxidx), products(rnmin, rxidx))" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ { - "outputs": [], - "cell_type": "code", - "source": [ - "u₀ = zeros(N)\nu₀[div(N,2)] = 10000\np = [1/(h*h)]\ntspan = (0.,.01)\noprob = ODEProblem(rn, u₀, tspan, p)" - ], - "metadata": {}, - "execution_count": null - }, + "data": { + "text/plain": [ + "true" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "rateexpr(rn, rxidx) == rateexpr(rnmin, rxidx)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Extending Incrementally Generated Networks to Include ODEs, SDEs or Jumps\n", + "Once a network generated from `@min_reaction_network` or\n", + "`@empty_reaction_network` has had all the associated species, parameters and\n", + "reactions filled in, corresponding ODE, SDE or jump models can be constructed.\n", + "The relevant API functions are `addodes!`, `addsdes!` and `addjumps!`. One\n", + "benefit to contructing models with these functions is that they offer more\n", + "fine-grained control over what actually gets constructed. For example,\n", + "`addodes!` has the optional keyword argument, `build_jac`, which if set to\n", + "`false` will disable construction of symbolic Jacobians and functions for\n", + "evaluating Jacobians. For large networks this can give a significant speed-up in\n", + "the time required for constructing an ODE model. Each function and its\n", + "associated keyword arguments are described in the API section, [Functions to add\n", + "ODEs, SDEs or Jumps to a\n", + "Network](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Functions-to-Add-ODEs,-SDEs-or-Jumps-to-a-Network-1).\n", + "\n", + "Let's extend `rnmin` to include the needed functions for use in ODE\n", + "solvers:" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "addodes!(rnmin)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The [Generated Functions for\n", + "Models](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Generated-Functions-for-Models-1)\n", + "section of the API shows what functions have been generated. For ODEs these\n", + "include `oderhsfun(rnmin)`, which returns a function of the form `f(du,u,p,t)`\n", + "which evaluates the ODEs (i.e. the time derivatives of `u`) within `du`. For\n", + "each generated function, the corresponding expressions from which it was\n", + "generated can be retrieved using accessors from the [Generated\n", + "Expressions](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Generated-Expressions-1)\n", + "section of the API. The equations within `du` can be retrieved using the\n", + "`odeexprs(rnmin)` function. For example:" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "We are now ready to solve the problem and plot the solution. Since we have\nessentially generated a method of lines discretization of the diffusion equation\nwith a discontinuous initial condition, we'll use an A-L stable implicit ODE\nsolver, `KenCarp4`, and plot the solution at a few times:" - ], - "metadata": {} - }, + "data": { + "text/plain": [ + "7-element Array{Union{Float64, Int64, Expr, Symbol},1}:\n", + " :((-(δ * m₁) + γ) + (α * K ^ n) / (K ^ n + D₂ ^ n)) \n", + " :((-(δ * m₂) + γ) + (α * K ^ n) / (K ^ n + D₁ ^ n)) \n", + " :(((((β * m₁ - μ * P₁) + -2 * (k₊ / 2) * P₁ ^ 2) + 2 * k₋ * D₁) - k₊ * P₁ * P₂) + k₋ * T)\n", + " :(((((β * m₂ - μ * P₂) + -2 * (k₊ / 2) * P₂ ^ 2) + 2 * k₋ * D₂) - k₊ * P₁ * P₂) + k₋ * T)\n", + " :((k₊ / 2) * P₁ ^ 2 - k₋ * D₁) \n", + " :((k₊ / 2) * P₂ ^ 2 - k₋ * D₂) \n", + " :(k₊ * P₁ * P₂ - k₋ * T) " + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "odeexprs(rnmin)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Using Latexify we can see the ODEs themselves to compare with these expressions:" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ { - "outputs": [], - "cell_type": "code", - "source": [ - "sol = solve(oprob, KenCarp4())\ntimes = [0., .0001, .001, .01]\nplt = plot()\nfor time in times\n plot!(plt, 1:N, sol(time), fmt=fmt, xlabel=\"i\", ylabel=\"uᵢ\", label=string(\"t = \", time), lw=3)\nend\nplot(plt, ylims=(0.,10000.))" + "data": { + "text/latex": [ + "\\begin{align}\n", + "\\frac{dm_1}{dt} =& - \\delta \\cdot m_1 + \\gamma + \\frac{\\alpha \\cdot K^{n}}{K^{n} + D_2^{n}} \\\\\n", + "\\frac{dm_2}{dt} =& - \\delta \\cdot m_2 + \\gamma + \\frac{\\alpha \\cdot K^{n}}{K^{n} + D_1^{n}} \\\\\n", + "\\frac{dP_1}{dt} =& \\beta \\cdot m_1 - \\mu \\cdot P_1 -2 \\cdot \\frac{k_+}{2} \\cdot P_1^{2} + 2 \\cdot k_- \\cdot D_1 - k_+ \\cdot P_1 \\cdot P_2 + k_- \\cdot T \\\\\n", + "\\frac{dP_2}{dt} =& \\beta \\cdot m_2 - \\mu \\cdot P_2 -2 \\cdot \\frac{k_+}{2} \\cdot P_2^{2} + 2 \\cdot k_- \\cdot D_2 - k_+ \\cdot P_1 \\cdot P_2 + k_- \\cdot T \\\\\n", + "\\frac{dD_1}{dt} =& \\frac{k_+}{2} \\cdot P_1^{2} - k_- \\cdot D_1 \\\\\n", + "\\frac{dD_2}{dt} =& \\frac{k_+}{2} \\cdot P_2^{2} - k_- \\cdot D_2 \\\\\n", + "\\frac{dT}{dt} =& k_+ \\cdot P_1 \\cdot P_2 - k_- \\cdot T \\\\\n", + "\\end{align}\n" ], - "metadata": {}, - "execution_count": null - }, + "text/plain": [ + "L\"\\begin{align}\n", + "\\frac{dm_1}{dt} =& - \\delta \\cdot m_1 + \\gamma + \\frac{\\alpha \\cdot K^{n}}{K^{n} + D_2^{n}} \\\\\n", + "\\frac{dm_2}{dt} =& - \\delta \\cdot m_2 + \\gamma + \\frac{\\alpha \\cdot K^{n}}{K^{n} + D_1^{n}} \\\\\n", + "\\frac{dP_1}{dt} =& \\beta \\cdot m_1 - \\mu \\cdot P_1 -2 \\cdot \\frac{k_+}{2} \\cdot P_1^{2} + 2 \\cdot k_- \\cdot D_1 - k_+ \\cdot P_1 \\cdot P_2 + k_- \\cdot T \\\\\n", + "\\frac{dP_2}{dt} =& \\beta \\cdot m_2 - \\mu \\cdot P_2 -2 \\cdot \\frac{k_+}{2} \\cdot P_2^{2} + 2 \\cdot k_- \\cdot D_2 - k_+ \\cdot P_1 \\cdot P_2 + k_- \\cdot T \\\\\n", + "\\frac{dD_1}{dt} =& \\frac{k_+}{2} \\cdot P_1^{2} - k_- \\cdot D_1 \\\\\n", + "\\frac{dD_2}{dt} =& \\frac{k_+}{2} \\cdot P_2^{2} - k_- \\cdot D_2 \\\\\n", + "\\frac{dT}{dt} =& k_+ \\cdot P_1 \\cdot P_2 - k_- \\cdot T \\\\\n", + "\\end{align}\n", + "\"" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "latexify(rnmin)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For ODEs two other functions are generated by `addodes!`. `jacfun(rnmin)` will\n", + "return the generated Jacobian evaluation function, `fjac(dJ,u,p,t)`, which given\n", + "the current solution `u` evaluates the Jacobian within `dJ`.\n", + "`jacobianexprs(rnmin)` gives the corresponding matrix of expressions, which can\n", + "be used with Latexify to see the Jacobian:" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "Here we see the characteristic diffusion of molecules from the center of the\ndomain, resulting in a shortening and widening of the solution as $t$ increases.\n\nLet's now look at a stochastic chemical kinetics jump process version of the\nmodel, where β gives the probability per time each molecule can hop from its\ncurrent lattice site to an individual neighboring site. We first add in the\njumps, disabling `regular_jumps` since they are not needed, and using the\n`minimal_jumps` flag to construct a minimal representation of the needed jumps.\nWe then construct a `JumpProblem`, and use the Composition-Rejection Direct\nmethod, `DirectCR`, to simulate the process of the molecules hopping about on\nthe lattice:" + "data": { + "text/latex": [ + "\\begin{equation}\n", + "\\left[\n", + "\\begin{array}{ccccccc}\n", + " - \\delta & 0 & 0 & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot D_2^{-1 + n}}{\\left( K^{n} + D_2^{n} \\right)^{2}} & 0 \\\\\n", + "0 & - \\delta & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot D_1^{-1 + n}}{\\left( K^{n} + D_1^{n} \\right)^{2}} & 0 & 0 \\\\\n", + "\\beta & 0 & - \\mu - 2 \\cdot k_+ \\cdot P_1 - k_+ \\cdot P_2 & - k_+ \\cdot P_1 & 2 \\cdot k_- & 0 & k_{-} \\\\\n", + "0 & \\beta & - k_+ \\cdot P_2 & - \\mu - 2 \\cdot k_+ \\cdot P_2 - k_+ \\cdot P_1 & 0 & 2 \\cdot k_- & k_{-} \\\\\n", + "0 & 0 & k_+ \\cdot P_1 & 0 & - k_- & 0 & 0 \\\\\n", + "0 & 0 & 0 & k_+ \\cdot P_2 & 0 & - k_- & 0 \\\\\n", + "0 & 0 & k_+ \\cdot P_2 & k_+ \\cdot P_1 & 0 & 0 & - k_- \\\\\n", + "\\end{array}\n", + "\\right]\n", + "\\end{equation}\n" ], - "metadata": {} - }, + "text/plain": [ + "L\"\\begin{equation}\n", + "\\left[\n", + "\\begin{array}{ccccccc}\n", + " - \\delta & 0 & 0 & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot D_2^{-1 + n}}{\\left( K^{n} + D_2^{n} \\right)^{2}} & 0 \\\\\n", + "0 & - \\delta & 0 & 0 & \\frac{ - K^{n} \\cdot n \\cdot \\alpha \\cdot D_1^{-1 + n}}{\\left( K^{n} + D_1^{n} \\right)^{2}} & 0 & 0 \\\\\n", + "\\beta & 0 & - \\mu - 2 \\cdot k_+ \\cdot P_1 - k_+ \\cdot P_2 & - k_+ \\cdot P_1 & 2 \\cdot k_- & 0 & k_{-} \\\\\n", + "0 & \\beta & - k_+ \\cdot P_2 & - \\mu - 2 \\cdot k_+ \\cdot P_2 - k_+ \\cdot P_1 & 0 & 2 \\cdot k_- & k_{-} \\\\\n", + "0 & 0 & k_+ \\cdot P_1 & 0 & - k_- & 0 & 0 \\\\\n", + "0 & 0 & 0 & k_+ \\cdot P_2 & 0 & - k_- & 0 \\\\\n", + "0 & 0 & k_+ \\cdot P_2 & k_+ \\cdot P_1 & 0 & 0 & - k_- \\\\\n", + "\\end{array}\n", + "\\right]\n", + "\\end{equation}\n", + "\"" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "latexify(jacobianexprs(rnmin))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`addodes!` also generates a function that evaluates the Jacobian of the ODE\n", + "derivative functions with respect to the parameters. `paramjacfun(rnmin)` then\n", + "returns the generated function. It has the form `fpjac(dPJ,u,p,t)`, which\n", + "given the current solution `u` evaluates the Jacobian matrix with respect to\n", + "parameters `p` within `dPJ`. For use in DifferentialEquations.jl solvers, an\n", + "[`ODEFunction`](http://docs.juliadiffeq.org/latest/features/performance_overloads.html)\n", + "representation of the ODEs is available from `odefun(rnmin)`. \n", + "\n", + "`addsdes!` and `addjumps!` work similarly to complete the network for use in\n", + "StochasticDiffEq and DiffEqJump solvers. \n", + "\n", + "#### Note on Using Generated Function and Expression API Functions\n", + "The generated functions and expressions accessible through the API require first\n", + "calling the appropriate `addodes!`, `addsdes` or `addjumps` function. These are\n", + "responsible for actually constructing the underlying functions and expressions.\n", + "The API accessors simply return already constructed functions and expressions\n", + "that are stored within the `reaction_network` structure.\n", + "\n", + "---\n", + "## Example of Generating a Network Programmatically\n", + "For a user directly typing in a reaction network, it is generally easier to use\n", + "the `@min_reaction_network` or `@reaction_network` macros to fully specify\n", + "reactions. However, for large, structured networks it can be much easier to\n", + "generate the network programmatically. For very large networks, with tens of\n", + "thousands of reactions, the form of `addreaction!` that uses stoichiometric\n", + "coefficients should be preferred as it offers substantially better performance.\n", + "To put together everything we've seen, let's generate the network corresponding\n", + "to a 1D continuous time random walk, approximating the diffusion of molecules\n", + "within an interval.\n", + "\n", + "The basic \"reaction\" network we wish to study is \n", + "\n", + "$$\n", + "u_1 \\leftrightarrows u_2 \\leftrightarrows u_3 \\cdots \\leftrightarrows u_{N}\n", + "$$\n", + "\n", + "for $N$ lattice sites on $[0,1]$. For $h = 1/N$ the lattice spacing, we'll\n", + "assume the rate molecules hop from their current site to any particular neighbor\n", + "is just $h^{-2}$. We can interpret this hopping process as a collection of\n", + "$2N-2$ \"reactions\", with the form $u_i \\to u_j$ for $j=i+1$ or $j=i-1$. We construct\n", + "the corresponding reaction network as follows. First we set values for the basic\n", + "parameters:" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ { - "outputs": [], - "cell_type": "code", - "source": [ - "addjumps!(rn, build_regular_jumps=false, minimal_jumps=true)\n\n# make the initial condition integer valued \nu₀ = zeros(Int, N)\nu₀[div(N,2)] = 10000\n\n# setup and solve the problem\ndprob = DiscreteProblem(rn, u₀, tspan, p)\njprob = JumpProblem(dprob, DirectCR(), rn, save_positions=(false,false))\njsol = solve(jprob, SSAStepper(), saveat=times)" - ], - "metadata": {}, - "execution_count": null - }, + "data": { + "text/plain": [ + "0.015625" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "N = 64\n", + "h = 1 / N" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "then we create an empty network, and add each species" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "rn = @empty_reaction_network\n", + "\n", + "for i = 1:N\n", + " addspecies!(rn, Symbol(:u, i))\n", + "end" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We next add one parameter `β`, which we will set equal to the hopping rate \n", + "of molecules, $h^{-2}$:" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [], + "source": [ + "addparam!(rn, :β)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we add in the $2N-2$ possible hopping reactions:" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "for i = 1:N\n", + " (i < N) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i+1)=>1,))\n", + " (i > 1) && addreaction!(rn, :β, (Symbol(:u,i)=>1,), (Symbol(:u,i-1)=>1,))\n", + "end" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's first construct an ODE model for the network" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "addodes!(rn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We now need to specify the initial condition, parameter vector and time interval\n", + "to solve on. We start with 10000 molecules placed at the center of the domain,\n", + "and setup an `ODEProblem` to solve:" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "We can now plot bar graphs showing the locations of the molecules at the same\nset of times we examined the ODE solution. For comparison, we also plot the\ncorresponding ODE solutions (red lines) that we found:" - ], - "metadata": {} - }, + "data": { + "text/plain": [ + "\u001b[36mODEProblem\u001b[0m with uType \u001b[36mArray{Float64,1}\u001b[0m and tType \u001b[36mFloat64\u001b[0m. In-place: \u001b[36mtrue\u001b[0m\n", + "timespan: (0.0, 0.01)\n", + "u0: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]" + ] + }, + "execution_count": 32, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "u₀ = zeros(N)\n", + "u₀[div(N,2)] = 10000\n", + "p = [1/(h*h)]\n", + "tspan = (0.,.01)\n", + "oprob = ODEProblem(rn, u₀, tspan, p)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We are now ready to solve the problem and plot the solution. Since we have\n", + "essentially generated a method of lines discretization of the diffusion equation\n", + "with a discontinuous initial condition, we'll use an A-L stable implicit ODE\n", + "solver, `Rodas5`, and plot the solution at a few times:" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [ { - "outputs": [], - "cell_type": "code", - "source": [ - "times = [0., .0001, .001, .01]\nplts = []\nfor i = 1:4\n b = bar(1:N, jsol[i], legend=false, fmt=fmt, xlabel=\"i\", ylabel=\"uᵢ\", title=string(\"t = \", times[i]))\n plot!(b,sol(times[i]))\n push!(plts,b)\nend\nplot(plts...)" - ], - "metadata": {}, - "execution_count": null - }, + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n" + ] + }, + "execution_count": 33, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sol = solve(oprob, Rodas5())\n", + "times = [0., .0001, .001, .01]\n", + "plt = plot()\n", + "for time in times\n", + " plot!(plt, 1:N, sol(time), fmt=fmt, xlabel=\"i\", ylabel=\"uᵢ\", label=string(\"t = \", time), lw=3)\n", + "end\n", + "plot(plt, ylims=(0.,10000.))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here we see the characteristic diffusion of molecules from the center of the\n", + "domain, resulting in a shortening and widening of the solution as $t$ increases.\n", + "\n", + "Let's now look at a stochastic chemical kinetics jump process version of the\n", + "model, where β gives the probability per time each molecule can hop from its\n", + "current lattice site to an individual neighboring site. We first add in the\n", + "jumps, disabling `regular_jumps` since they are not needed, and using the\n", + "`minimal_jumps` flag to construct a minimal representation of the needed jumps.\n", + "We then construct a `JumpProblem`, and use the Composition-Rejection Direct\n", + "method, `DirectCR`, to simulate the process of the molecules hopping about on\n", + "the lattice:" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "source": [ - "Similar to the ODE solutions, we see that the molecules spread out and become\nmore and more well-mixed throughout the domain as $t$ increases. The simulation\nresults are noisy due to the finite numbers of molecules present in the\nstochsatic simulation, but since the number of molecules is large they agree\nwell with the ODE solution at each time.\n\n---\n## Getting Help\nHave a question related to DiffEqBiological or this tutorial? Feel free to ask\nin the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby).\nIf you think you've found a bug in DiffEqBiological, or would like to\nrequest/discuss new functionality, feel free to open an issue on\n[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check\nthere is no related issue already open). If you've found a bug in this tutorial,\nor have a suggestion, feel free to open an issue on the [DiffEqTutorials Github\nsite](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull\nrequest to DiffEqTutorials updating the tutorial!\n\n---" - ], - "metadata": {} + "data": { + "text/plain": [ + "retcode: Default\n", + "Interpolation: Piecewise constant interpolation\n", + "t: 4-element Array{Float64,1}:\n", + " 0.0 \n", + " 0.0001\n", + " 0.001 \n", + " 0.01 \n", + "u: 4-element Array{Array{Int64,1},1}:\n", + " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0 … 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] \n", + " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0 … 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] \n", + " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0 … 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] \n", + " [0, 2, 4, 3, 4, 2, 12, 15, 24, 26 … 25, 16, 11, 7, 7, 4, 1, 1, 3, 1]" + ] + }, + "execution_count": 34, + "metadata": {}, + "output_type": "execute_result" } - ], - "nbformat_minor": 2, - "metadata": { - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.1.1" - }, - "kernelspec": { - "name": "julia-1.1", - "display_name": "Julia 1.1.1", - "language": "julia" + ], + "source": [ + "addjumps!(rn, build_regular_jumps=false, minimal_jumps=true)\n", + "\n", + "# make the initial condition integer valued \n", + "u₀ = zeros(Int, N)\n", + "u₀[div(N,2)] = 10000\n", + "\n", + "# setup and solve the problem\n", + "dprob = DiscreteProblem(rn, u₀, tspan, p)\n", + "jprob = JumpProblem(dprob, DirectCR(), rn, save_positions=(false,false))\n", + "jsol = solve(jprob, SSAStepper(), saveat=times)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now plot bar graphs showing the locations of the molecules at the same\n", + "set of times we examined the ODE solution. For comparison, we also plot the\n", + "corresponding ODE solutions (red lines) that we found:" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n" + ] + }, + "execution_count": 35, + "metadata": {}, + "output_type": "execute_result" } + ], + "source": [ + "times = [0., .0001, .001, .01]\n", + "plts = []\n", + "for i = 1:4\n", + " b = bar(1:N, jsol[i], legend=false, fmt=fmt, xlabel=\"i\", ylabel=\"uᵢ\", title=string(\"t = \", times[i]))\n", + " plot!(b,sol(times[i]))\n", + " push!(plts,b)\n", + "end\n", + "plot(plts...)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Similar to the ODE solutions, we see that the molecules spread out and become\n", + "more and more well-mixed throughout the domain as $t$ increases. The simulation\n", + "results are noisy due to the finite numbers of molecules present in the\n", + "stochsatic simulation, but since the number of molecules is large they agree\n", + "well with the ODE solution at each time.\n", + "\n", + "---\n", + "## Getting Help\n", + "Have a question related to DiffEqBiological or this tutorial? Feel free to ask\n", + "in the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby).\n", + "If you think you've found a bug in DiffEqBiological, or would like to\n", + "request/discuss new functionality, feel free to open an issue on\n", + "[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check\n", + "there is no related issue already open). If you've found a bug in this tutorial,\n", + "or have a suggestion, feel free to open an issue on the [DiffEqTutorials Github\n", + "site](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull\n", + "request to DiffEqTutorials updating the tutorial!\n", + "\n", + "---" + ] + } + ], + "metadata": { + "@webio": { + "lastCommId": null, + "lastKernelId": null + }, + "kernelspec": { + "display_name": "Julia 1.1.1", + "language": "julia", + "name": "julia-1.1" }, - "nbformat": 4 + "language_info": { + "file_extension": ".jl", + "mimetype": "application/julia", + "name": "julia", + "version": "1.1.1" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/notebook/models/04b-diffeqbio_III_steadystates.ipynb b/notebook/models/04b-diffeqbio_III_steadystates.ipynb new file mode 100644 index 00000000..b2e4abe1 --- /dev/null +++ b/notebook/models/04b-diffeqbio_III_steadystates.ipynb @@ -0,0 +1,259 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# DiffEqBiological Tutorial III: Steady-States and Bifurcations\n### Torkel Loman and Samuel Isaacson\n\nSeveral types of steady state analysis can be performed for networks defined\nwith DiffEqBiological by utilizing homotopy continuation. This allows for\nfinding the steady states and bifurcations within a large class of systems. In\nthis tutorial we'll go through several examples of using this functionality.\n\nWe start by loading the necessary packages:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using DiffEqBiological, Plots\ngr(); default(fmt = :png);" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Steady states and stability of a biochemical reaction network.\nBistable switches are well known biological motifs, characterised by the\npresence of two different stable steady states." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "bistable_switch = @reaction_network begin\n d, (X,Y) → ∅\n hillR(Y,v1,K1,n1), ∅ → X\n hillR(X,v2,K2,n2), ∅ → Y\nend d v1 K1 n1 v2 K2 n2\nd = 0.01;\nv1 = 1.5; K1 = 30; n1 = 3;\nv2 = 1.; K2 = 30; n2 = 3;\nbistable_switch_p = [d, v1 ,K1, n1, v2, K2, n2];" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "The steady states can be found using the `steady_states` function (which takes a reaction network and a set of parameter values as input). The stability of these steady states can be found using the `stability` function." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "ss = steady_states(bistable_switch, bistable_switch_p)" + ], + "metadata": {}, + "execution_count": null + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "stability(ss,bistable_switch, bistable_switch_p)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Since the equilibration methodology is based on homotopy continuation, it is not\nable to handle systems with non-integer exponents, or non polynomial reaction\nrates. Neither of the following two systems will work.\n\nThis system contains a non-integer exponent:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "rn1 = @reaction_network begin\n p, ∅ → X\n hill(X,v,K,n), X → ∅\nend p v K n\np1 = [1.,2.5,1.5,1.5]\nsteady_states(rn1,p1)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "This system contains a logarithmic reaction rate:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "rn2 = @reaction_network begin\n p, ∅ → X\n log(X), X → ∅\nend p\np2 = [1.]\nsteady_states(rn2,p2)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Bifurcation diagrams for biochemical reaction networks\nBifurcation diagrams illustrate how the steady states of a system depend on one\nor more parameters. They can be computed with the `bifurcations` function. It\ntakes the same arguments as `steady_states`, with the addition of the parameter\none wants to vary, and an interval over which to vary it:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "bif = bifurcations(bistable_switch, bistable_switch_p, :v1, (.1,5.))\nplot(bif,ylabel=\"[X]\",label=\"\")\nplot!([[],[]],color=[:blue :red],label = [\"Stable\" \"Unstable\"])" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "The values for the second variable in the system can also be displayed, by\ngiving that as an additional input to `plot` (it is the second argument, directly\nafter the bifurcation diagram object):" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "plot(bif,2,ylabel=\"[Y]\")\nplot!([[],[]],color=[:blue :red],label = [\"Stable\" \"Unstable\"])" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "The `plot` function also accepts all other arguments which the Plots.jl `plot` function accepts." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "bif = bifurcations(bistable_switch, bistable_switch_p,:v1,(.1,10.))\nplot(bif,linewidth=1.,title=\"A bifurcation diagram\",ylabel=\"Steady State concentration\")\nplot!([[],[]],color=[:blue :red],label = [\"Stable\" \"Unstable\"])" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Certain parameters, like `n1`, cannot be sensibly varied over a continuous\ninterval. Instead, a discrete bifurcation diagram can be calculated with the\n`bifurcation_grid` function. Instead of an interval, the last argument is a\nrange of numbers:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "bif = bifurcation_grid(bistable_switch, bistable_switch_p,:n1,1.:5.)\nplot(bif)\nscatter!([[],[]],color=[:blue :red],label = [\"Stable\" \"Unstable\"])" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Bifurcation diagrams over two dimensions\nIn addition to the bifurcation diagrams illustrated above, where only a single\nvariable is varied, it is also possible to investigate the steady state\nproperties of s system as two different parameters are varied. Due to the nature\nof the underlying bifurcation algorithm it is not possible to continuously vary\nboth parameters. Instead, a set of discrete values are selected for the first\nparameter, and a continuous interval for the second. Next, for each discrete\nvalue of the first parameter, a normal bifurcation diagram is created over the\ninterval given for the second parameter." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "bif = bifurcation_grid_diagram(bistable_switch, bistable_switch_p,:n1,0.:4.,:v1,(.1,5.))\nplot(bif)\nplot!([[],[]],color=[:blue :red],label = [\"Stable\" \"Unstable\"])" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "In the single variable case we could use a `bifurcation_grid` to investigate the\nbehavior of a parameter which could only attain discrete values. In the same\nway, if we are interested in two parameters, both of which require integer\nvalues, we can use `bifrucation_grid_2d`. In our case, this is required if we\nwant to vary both the parameters `n1` and `n2`:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "bif = bifurcation_grid_2d(bistable_switch, bistable_switch_p,:n1,1.:3.,:n2,1.:10.)\nplot(bif)\nscatter!([[],[]],color=[:blue :red],label = [\"Stable\" \"Unstable\"])" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### The Brusselator\nThe Brusselator is a well know reaction network, which may or may not oscillate,\ndepending on parameter values." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "brusselator = @reaction_network begin\n A, ∅ → X\n 1, 2X + Y → 3X\n B, X → Y\n 1, X → ∅\nend A B;\nA = 0.5; B = 4.;\nbrusselator_p = [A, B];" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "The system has only one steady state, for $(X,Y)=(A,B/A)$ This fixed point\nbecomes unstable when $B > 1+A^2$, leading to oscillations. Bifurcation diagrams\ncan be used to determine the system's stability, and hence look for where oscillations might appear in the Brusselator:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "bif = bifurcations(brusselator,brusselator_p,:B,(0.1,2.5))\nplot(bif,2)\nplot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = [\"Stable Real\" \"Stable Complex\" \"Unstable Complex\" \"Unstable Real\"])" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Here red and yellow colors label unstable steady-states, while blue and cyan\nlabel stable steady-states. (In addition, yellow and cyan correspond to points\nwhere at least one eigenvalue of the Jacobian is imaginary, while red and blue\ncorrespond to points with real-valued eigenvalues.)\n\nGiven `A=0.5`, the point at which the system should become unstable is `B=1.25`. We can confirm this in the bifurcation diagram.\n\nWe can also investigate the behavior when we vary both parameters of the system:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "bif = bifurcation_grid_diagram(brusselator,brusselator_p,:B,0.5:0.02:5.0,:A,(0.2,5.0))\nplot(bif)\nplot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = [\"Stable Real\" \"Stable Complex\" \"Unstable Complex\" \"Unstable Real\"])" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "---\n## Getting Help\nHave a question related to DiffEqBiological or this tutorial? Feel free to ask\nin the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby).\nIf you think you've found a bug in DiffEqBiological, or would like to\nrequest/discuss new functionality, feel free to open an issue on\n[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check\nthere is no related issue already open). If you've found a bug in this tutorial,\nor have a suggestion, feel free to open an issue on the [DiffEqTutorials Github\nsite](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull\nrequest to DiffEqTutorials updating the tutorial!\n\n---" + ], + "metadata": {} + } + ], + "nbformat_minor": 2, + "metadata": { + "language_info": { + "file_extension": ".jl", + "mimetype": "application/julia", + "name": "julia", + "version": "1.2.0" + }, + "kernelspec": { + "name": "julia-1.2", + "display_name": "Julia 1.2.0", + "language": "julia" + } + }, + "nbformat": 4 +} diff --git a/pdf/advanced/02-advanced_ODE_solving.pdf b/pdf/advanced/02-advanced_ODE_solving.pdf new file mode 100644 index 00000000..502abfca Binary files /dev/null and b/pdf/advanced/02-advanced_ODE_solving.pdf differ diff --git a/pdf/models/03-diffeqbio_I_introduction.pdf b/pdf/models/03-diffeqbio_I_introduction.pdf index 9378ff9d..b510e10e 100644 Binary files a/pdf/models/03-diffeqbio_I_introduction.pdf and b/pdf/models/03-diffeqbio_I_introduction.pdf differ diff --git a/pdf/models/04-diffeqbio_II_networkproperties.pdf b/pdf/models/04-diffeqbio_II_networkproperties.pdf index 8e9f7058..cc71d1bc 100644 Binary files a/pdf/models/04-diffeqbio_II_networkproperties.pdf and b/pdf/models/04-diffeqbio_II_networkproperties.pdf differ diff --git a/script/advanced/02-advanced_ODE_solving.jl b/script/advanced/02-advanced_ODE_solving.jl new file mode 100644 index 00000000..2de4cc7d --- /dev/null +++ b/script/advanced/02-advanced_ODE_solving.jl @@ -0,0 +1,200 @@ + +ccall((:openblas_get_num_threads64_, Base.libblas_name), Cint, ()) + + +using LinearAlgebra +LinearAlgebra.BLAS.set_num_threads(4) + + +using DifferentialEquations +function rober(du,u,p,t) + y₁,y₂,y₃ = u + k₁,k₂,k₃ = p + du[1] = -k₁*y₁+k₃*y₂*y₃ + du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃ + du[3] = k₂*y₂^2 + nothing +end +prob = ODEProblem(rober,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) +sol = solve(prob,Rosenbrock23()) + +using Plots +plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) + + +using BenchmarkTools +@btime solve(prob) + + +function rober_jac(J,u,p,t) + y₁,y₂,y₃ = u + k₁,k₂,k₃ = p + J[1,1] = k₁ * -1 + J[2,1] = k₁ + J[3,1] = 0 + J[1,2] = y₃ * k₃ + J[2,2] = y₂ * k₂ * -2 + y₃ * k₃ * -1 + J[3,2] = y₂ * 2 * k₂ + J[1,3] = k₃ * y₂ + J[2,3] = k₃ * y₂ * -1 + J[3,3] = 0 + nothing +end +f = ODEFunction(rober, jac=rober_jac) +prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) + +@btime solve(prob_jac) + + +using ModelingToolkit +de = modelingtoolkitize(prob) +ModelingToolkit.generate_jacobian(de...)[2] # Second is in-place + + +:((##MTIIPVar#376, u, p, t)->begin + #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:65 =# + #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:66 =# + let (x₁, x₂, x₃, α₁, α₂, α₃) = (u[1], u[2], u[3], p[1], p[2], p[3]) + ##MTIIPVar#376[1] = α₁ * -1 + ##MTIIPVar#376[2] = α₁ + ##MTIIPVar#376[3] = 0 + ##MTIIPVar#376[4] = x₃ * α₃ + ##MTIIPVar#376[5] = x₂ * α₂ * -2 + x₃ * α₃ * -1 + ##MTIIPVar#376[6] = x₂ * 2 * α₂ + ##MTIIPVar#376[7] = α₃ * x₂ + ##MTIIPVar#376[8] = α₃ * x₂ * -1 + ##MTIIPVar#376[9] = 0 + end + #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:67 =# + nothing + end) + + +jac = eval(ModelingToolkit.generate_jacobian(de...)[2]) +f = ODEFunction(rober, jac=jac) +prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) + + +I = [1,2,1,2,3,1,2] +J = [1,1,2,2,2,3,3] +using SparseArrays +jac_prototype = sparse(I,J,1.0) + + +f = ODEFunction(rober, jac=jac, jac_prototype=jac_prototype) +prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) + + +const N = 32 +const xyd_brusselator = range(0,stop=1,length=N) +brusselator_f(x, y, t) = (((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) * (t >= 1.1) * 5. +limit(a, N) = a == N+1 ? 1 : a == 0 ? N : a +function brusselator_2d_loop(du, u, p, t) + A, B, alpha, dx = p + alpha = alpha/dx^2 + @inbounds for I in CartesianIndices((N, N)) + i, j = Tuple(I) + x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]] + ip1, im1, jp1, jm1 = limit(i+1, N), limit(i-1, N), limit(j+1, N), limit(j-1, N) + du[i,j,1] = alpha*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) + + B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t) + du[i,j,2] = alpha*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) + + A*u[i,j,1] - u[i,j,1]^2*u[i,j,2] + end +end +p = (3.4, 1., 10., step(xyd_brusselator)) + + +using SparsityDetection, SparseArrays +input = rand(32,32,2) +output = similar(input) +sparsity_pattern = sparsity!(brusselator_2d_loop,output,input,p,0.0) +jac_sparsity = Float64.(sparse(sparsity_pattern)) + + +using Plots +spy(jac_sparsity,markersize=1,colorbar=false,color=:deep) + + +f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity) + + +function init_brusselator_2d(xyd) + N = length(xyd) + u = zeros(N, N, 2) + for I in CartesianIndices((N, N)) + x = xyd[I[1]] + y = xyd[I[2]] + u[I,1] = 22*(y*(1-y))^(3/2) + u[I,2] = 27*(x*(1-x))^(3/2) + end + u +end +u0 = init_brusselator_2d(xyd_brusselator) +prob_ode_brusselator_2d = ODEProblem(brusselator_2d_loop, + u0,(0.,11.5),p) + +prob_ode_brusselator_2d_sparse = ODEProblem(f, + u0,(0.,11.5),p) + + +@btime solve(prob_ode_brusselator_2d,save_everystep=false) +@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false) + + +using SparseDiffTools +colorvec = matrix_colors(jac_sparsity) +@show maximum(colorvec) + + +f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity, + colorvec=colorvec) +prob_ode_brusselator_2d_sparse = ODEProblem(f, + init_brusselator_2d(xyd_brusselator), + (0.,11.5),p) +@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false) + + +@btime solve(prob_ode_brusselator_2d,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) +@btime solve(prob_ode_brusselator_2d_sparse,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) + + +using DiffEqOperators +Jv = JacVecOperator(brusselator_2d_loop,u0,p,0.0) + + +f = ODEFunction(brusselator_2d_loop;jac_prototype=Jv) +prob_ode_brusselator_2d_jacfree = ODEProblem(f,u0,(0.,11.5),p) +@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) + + +using AlgebraicMultigrid +pc = aspreconditioner(ruge_stuben(jac_sparsity)) +@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES(Pl=pc)),save_everystep=false) + + +using Sundials +# Sparse Version +@btime solve(prob_ode_brusselator_2d_sparse,CVODE_BDF(),save_everystep=false) +# GMRES Version: Doesn't require any extra stuff! +@btime solve(prob_ode_brusselator_2d,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) + + +using DifferentialEquations +function rober(du,u,p,t) + y₁,y₂,y₃ = u + k₁,k₂,k₃ = p + du[1] = -k₁*y₁+k₃*y₂*y₃ + du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃ + du[3] = y₁ + y₂ + y₃ - 1 + nothing +end +M = [1. 0 0 + 0 1. 0 + 0 0 0] +f = ODEFunction(rober,mass_matrix=M) +prob_mm = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) +sol = solve(prob_mm,Rodas5()) + +plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) + diff --git a/script/models/03-diffeqbio_I_introduction.jl b/script/models/03-diffeqbio_I_introduction.jl index 15c99ef6..2500809d 100644 --- a/script/models/03-diffeqbio_I_introduction.jl +++ b/script/models/03-diffeqbio_I_introduction.jl @@ -25,14 +25,15 @@ end α K n δ γ β μ; latexify(repressilator; env=:chemical) -x = latexify(repressilator; env=:chemical, starred=true, mathjax=true); +mathjax = WEAVE_ARGS[:doctype] == "pdf" ? false : true +x = latexify(repressilator; env=:chemical, starred=true, mathjax=mathjax); display("text/latex", "$x"); -latexify(repressilator) +latexify(repressilator, cdot=false) -x = latexify(repressilator, starred=true); +x = latexify(repressilator, cdot=false, starred=true); display("text/latex", "$x"); @@ -89,6 +90,13 @@ u₀ = [5.] tspan = (0.,4.); +latexify(bdp, noise=true, cdot=false) + + +x = latexify(bdp, noise=true, cdot=false, starred=true); +display("text/latex", "$x"); + + # SDEProblem for CLE sprob = SDEProblem(bdp, u₀, tspan, p) @@ -98,10 +106,10 @@ sol = solve(sprob, tstops=range(0., step=4e-3, length=1001)) plot(sol, fmt=:svg) -latexify(jacobianexprs(repressilator)) +latexify(jacobianexprs(repressilator), cdot=false) -x = latexify(jacobianexprs(repressilator), starred=true); +x = latexify(jacobianexprs(repressilator), cdot=false, starred=true); display("text/latex", "$x"); diff --git a/script/models/04-diffeqbio_II_networkproperties.jl b/script/models/04-diffeqbio_II_networkproperties.jl index 5c9a0d2f..29fe117b 100644 --- a/script/models/04-diffeqbio_II_networkproperties.jl +++ b/script/models/04-diffeqbio_II_networkproperties.jl @@ -154,7 +154,7 @@ tspan = (0.,.01) oprob = ODEProblem(rn, u₀, tspan, p) -sol = solve(oprob, KenCarp4()) +sol = solve(oprob, Rodas5()) times = [0., .0001, .001, .01] plt = plot() for time in times diff --git a/script/models/04b-diffeqbio_III_steadystates.jl b/script/models/04b-diffeqbio_III_steadystates.jl new file mode 100644 index 00000000..f01b21ff --- /dev/null +++ b/script/models/04b-diffeqbio_III_steadystates.jl @@ -0,0 +1,90 @@ + +using DiffEqBiological, Plots +gr(); default(fmt = :png); + + +bistable_switch = @reaction_network begin + d, (X,Y) → ∅ + hillR(Y,v1,K1,n1), ∅ → X + hillR(X,v2,K2,n2), ∅ → Y +end d v1 K1 n1 v2 K2 n2 +d = 0.01; +v1 = 1.5; K1 = 30; n1 = 3; +v2 = 1.; K2 = 30; n2 = 3; +bistable_switch_p = [d, v1 ,K1, n1, v2, K2, n2]; + + +ss = steady_states(bistable_switch, bistable_switch_p) + + +stability(ss,bistable_switch, bistable_switch_p) + + +rn1 = @reaction_network begin + p, ∅ → X + hill(X,v,K,n), X → ∅ +end p v K n +p1 = [1.,2.5,1.5,1.5] +steady_states(rn1,p1) + + +rn2 = @reaction_network begin + p, ∅ → X + log(X), X → ∅ +end p +p2 = [1.] +steady_states(rn2,p2) + + +bif = bifurcations(bistable_switch, bistable_switch_p, :v1, (.1,5.)) +plot(bif,ylabel="[X]",label="") +plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) + + +plot(bif,2,ylabel="[Y]") +plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) + + +bif = bifurcations(bistable_switch, bistable_switch_p,:v1,(.1,10.)) +plot(bif,linewidth=1.,title="A bifurcation diagram",ylabel="Steady State concentration") +plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) + + +bif = bifurcation_grid(bistable_switch, bistable_switch_p,:n1,1.:5.) +plot(bif) +scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) + + +bif = bifurcation_grid_diagram(bistable_switch, bistable_switch_p,:n1,0.:4.,:v1,(.1,5.)) +plot(bif) +plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) + + +bif = bifurcation_grid_2d(bistable_switch, bistable_switch_p,:n1,1.:3.,:n2,1.:10.) +plot(bif) +scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) + + +brusselator = @reaction_network begin + A, ∅ → X + 1, 2X + Y → 3X + B, X → Y + 1, X → ∅ +end A B; +A = 0.5; B = 4.; +brusselator_p = [A, B]; + + +bif = bifurcations(brusselator,brusselator_p,:B,(0.1,2.5)) +plot(bif,2) +plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"]) + + +bif = bifurcation_grid_diagram(brusselator,brusselator_p,:B,0.5:0.02:5.0,:A,(0.2,5.0)) +plot(bif) +plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"]) + + +using DiffEqTutorials +DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) + diff --git a/src/DiffEqTutorials.jl b/src/DiffEqTutorials.jl index ab8261e5..1407915d 100644 --- a/src/DiffEqTutorials.jl +++ b/src/DiffEqTutorials.jl @@ -13,30 +13,35 @@ function weave_file(folder,file,build_list=(:script,:html,:pdf,:notebook); kwarg println("Building Script") dir = joinpath(repo_directory,"script",folder) isdir(dir) || mkdir(dir) + args[:doctype] = "script" tangle(tmp;out_path=dir) end if :html ∈ build_list println("Building HTML") dir = joinpath(repo_directory,"html",folder) isdir(dir) || mkdir(dir) - weave(tmp,doctype = "md2html",out_path=dir,args=args; css=cssfile, kwargs...) + args[:doctype] = "html" + weave(tmp,doctype = "md2html",out_path=dir,args=args; fig_ext=".svg", css=cssfile, kwargs...) end if :pdf ∈ build_list println("Building PDF") dir = joinpath(repo_directory,"pdf",folder) isdir(dir) || mkdir(dir) + args[:doctype] = "pdf" weave(tmp,doctype="md2pdf",out_path=dir,args=args; template=latexfile, kwargs...) end if :github ∈ build_list println("Building Github Markdown") dir = joinpath(repo_directory,"markdown",folder) isdir(dir) || mkdir(dir) + args[:doctype] = "github" weave(tmp,doctype = "github",out_path=dir,args=args; kwargs...) end if :notebook ∈ build_list println("Building Notebook") dir = joinpath(repo_directory,"notebook",folder) isdir(dir) || mkdir(dir) + args[:doctype] = "notebook" Weave.convert_doc(tmp,joinpath(dir,file[1:end-4]*".ipynb")) end end @@ -94,7 +99,7 @@ function tutorial_footer(folder=nothing, file=nothing; remove_homedir=true) md *= "```\nStatus `$(projfile)`\n" for pkg in pkgs - if pkg.old.ver != nothing + if !isnothing(pkg.old) && pkg.old.ver !== nothing md *= "[$(string(pkg.uuid))] $(string(pkg.name)) $(string(pkg.old.ver))\n" else md *= "[$(string(pkg.uuid))] $(string(pkg.name))\n" diff --git a/tutorials/advanced/01-beeler_reuter.jmd b/tutorials/advanced/01-beeler_reuter.jmd index c07b64ab..59c4ab01 100644 --- a/tutorials/advanced/01-beeler_reuter.jmd +++ b/tutorials/advanced/01-beeler_reuter.jmd @@ -7,7 +7,7 @@ author: Shahriar Iravanian [JuliaDiffEq](https://github.com/JuliaDiffEq) is a suite of optimized Julia libraries to solve ordinary differential equations (ODE). *JuliaDiffEq* provides a large number of explicit and implicit solvers suited for different types of ODE problems. It is possible to reduce a system of partial differential equations into an ODE problem by employing the [method of lines (MOL)](https://en.wikipedia.org/wiki/Method_of_lines). The essence of MOL is to discretize the spatial derivatives (by finite difference, finite volume or finite element methods) into algebraic equations and to keep the time derivatives as is. The resulting differential equations are left with only one independent variable (time) and can be solved with an ODE solver. [Solving Systems of Stochastic PDEs and using GPUs in Julia](http://www.stochasticlifestyle.com/solving-systems-stochastic-pdes-using-gpus-julia/) is a brief introduction to MOL and using GPUs to accelerate PDE solving in *JuliaDiffEq*. Here we expand on this introduction by developing an implicit/explicit (IMEX) solver for a 2D cardiac electrophysiology model and show how to use [CuArray](https://github.com/JuliaGPU/CuArrays.jl) and [CUDAnative](https://github.com/JuliaGPU/CUDAnative.jl) libraries to run the explicit part of the model on a GPU. -Note that this tutorial does not use the [higher order IMEX methods built into DifferentialEquations.jl](http://docs.juliadiffeq.org/latest/solvers/split_ode_solve.html#Implicit-Explicit-(IMEX)-ODE-1) but instead shows how to hand-split an equation when the explicit portion has an analytical solution (or approxiate), which is common in many scenarios. +Note that this tutorial does not use the [higher order IMEX methods built into DifferentialEquations.jl](http://docs.juliadiffeq.org/dev/solvers/split_ode_solve.html#Implicit-Explicit-(IMEX)-ODE-1) but instead shows how to hand-split an equation when the explicit portion has an analytical solution (or approxiate), which is common in many scenarios. There are hundreds of ionic models that describe cardiac electrical activity in various degrees of detail. Most are based on the classic [Hodgkin-Huxley model](https://en.wikipedia.org/wiki/Hodgkin%E2%80%93Huxley_model) and define the time-evolution of different state variables in the form of nonlinear first-order ODEs. The state vector for these models includes the transmembrane potential, gating variables, and ionic concentrations. The coupling between cells is through the transmembrame potential only and is described as a reaction-diffusion equation, which is a parabolic PDE, @@ -27,7 +27,7 @@ We have chosen the [Beeler-Reuter ventricular ionic model](https://www.ncbi.nlm. ## CPU-Only Beeler-Reuter Solver -Let's start by developing a CPU only IMEX solver. The main idea is to use the *DifferentialEquations* framework to handle the implicit part of the equation and code the analytical approximation for explicit part separately. If no analytical approximation was known for the explicit part, one could use methods from [this list](http://docs.juliadiffeq.org/latest/solvers/split_ode_solve.html#Implicit-Explicit-(IMEX)-ODE-1). +Let's start by developing a CPU only IMEX solver. The main idea is to use the *DifferentialEquations* framework to handle the implicit part of the equation and code the analytical approximation for explicit part separately. If no analytical approximation was known for the explicit part, one could use methods from [this list](http://docs.juliadiffeq.org/dev/solvers/split_ode_solve.html#Implicit-Explicit-(IMEX)-ODE-1). First, we define the model constants: @@ -129,7 +129,7 @@ end ### The Rush-Larsen Method -We use an explicit solver for all the state variables except for the transmembrane potential which is solved with the help of an implicit solver. The explicit solver is a domain-specific exponential method, the Rush-Larsen method. This method utilizes an approximation on the model in order to transform the IMEX equation into a form suitable for an implicit ODE solver. This combination of implicit and explicit methods forms a specialized IMEX solver. For general IMEX integration, please see the [IMEX solvers documentation](http://docs.juliadiffeq.org/latest/solvers/split_ode_solve.html#Implicit-Explicit-%28IMEX%29-ODE-1). While we could have used the general model to solve the current problem, for this specific model, the transformation approach is more efficient and is of practical interest. +We use an explicit solver for all the state variables except for the transmembrane potential which is solved with the help of an implicit solver. The explicit solver is a domain-specific exponential method, the Rush-Larsen method. This method utilizes an approximation on the model in order to transform the IMEX equation into a form suitable for an implicit ODE solver. This combination of implicit and explicit methods forms a specialized IMEX solver. For general IMEX integration, please see the [IMEX solvers documentation](http://docs.juliadiffeq.org/dev/solvers/split_ode_solve.html#Implicit-Explicit-%28IMEX%29-ODE-1). While we could have used the general model to solve the current problem, for this specific model, the transformation approach is more efficient and is of practical interest. The [Rush-Larsen](https://ieeexplore.ieee.org/document/4122859/) method replaces the explicit Euler integration for the gating variables with direct integration. The starting point is the general ODE for the gating variables in Hodgkin-Huxley style ODEs, diff --git a/tutorials/advanced/02-advanced_ODE_solving.jmd b/tutorials/advanced/02-advanced_ODE_solving.jmd new file mode 100644 index 00000000..44090834 --- /dev/null +++ b/tutorials/advanced/02-advanced_ODE_solving.jmd @@ -0,0 +1,506 @@ +--- +title: Solving Stiff Equations +author: Chris Rackauckas +--- + +This tutorial is for getting into the extra features for solving stiff ordinary +differential equations in an efficient manner. Solving stiff ordinary +differential equations requires specializing the linear solver on properties of +the Jacobian in order to cut down on the O(n^3) linear solve and the O(n^2) +back-solves. Note that these same functions and controls also extend to stiff +SDEs, DDEs, DAEs, etc. + +## Code Optimization for Differential Equations + +### Writing Efficient Code + +For a detailed tutorial on how to optimize one's DifferentialEquations.jl code, +please see the +[Optimizing DiffEq Code tutorial](http://tutorials.juliadiffeq.org/html/introduction/03-optimizing_diffeq_code.html). + +### Choosing a Good Solver + +Choosing a good solver is required for getting top notch speed. General +recommendations can be found on the solver page (for example, the +[ODE Solver Recommendations](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html)). +The current recommendations can be simplified to a Rosenbrock method +(`Rosenbrock23` or `Rodas5`) for smaller (<50 ODEs) problems, ESDIRK methods +for slightly larger (`TRBDF2` or `KenCarp4` for <2000 ODEs), and Sundials +`CVODE_BDF` for even larger problems. `lsoda` from +[LSODA.jl](https://github.com/rveltz/LSODA.jl) is generally worth a try. + +More details on the solver to choose can be found by benchmarking. See the +[DiffEqBenchmarks](https://github.com/JuliaDiffEq/DiffEqBenchmarks.jl) to +compare many solvers on many problems. + +### Check Out the Speed FAQ + +See [this FAQ](http://docs.juliadiffeq.org/dev/basics/faq.html#Performance-1) +for information on common pitfalls and how to improve performance. + +### Setting Up Your Julia Installation for Speed + +Julia uses an underlying BLAS implementation for its matrix multiplications +and factorizations. This library is automatically multithreaded and accelerates +the internal linear algebra of DifferentialEquations.jl. However, for optimality, +you should make sure that the number of BLAS threads that you are using matches +the number of physical cores and not the number of logical cores. See +[this issue for more details](https://github.com/JuliaLang/julia/issues/33409). + +To check the number of BLAS threads, use: + +```julia +ccall((:openblas_get_num_threads64_, Base.libblas_name), Cint, ()) +``` + +If I want to set this directly to 4 threads, I would use: + +```julia +using LinearAlgebra +LinearAlgebra.BLAS.set_num_threads(4) +``` + +Additionally, in some cases Intel's MKL might be a faster BLAS than the standard +BLAS that ships with Julia (OpenBLAS). To switch your BLAS implementation, you +can use [MKL.jl](https://github.com/JuliaComputing/MKL.jl) which will accelerate +the linear algebra routines. Please see the package for the limitations. + +### Use Accelerator Hardware + +When possible, use GPUs. If your ODE system is small and you need to solve it +with very many different parameters, see the +[ensembles interface](http://docs.juliadiffeq.org/dev/features/ensemble.html) +and [DiffEqGPU.jl](https://github.com/JuliaDiffEq/DiffEqGPU.jl). If your problem +is large, consider using a [CuArray](https://github.com/JuliaGPU/CuArrays.jl) +for the state to allow for GPU-parallelism of the internal linear algebra. + +## Speeding Up Jacobian Calculations + +When one is using an implicit or semi-implicit differential equation solver, +the Jacobian must be built at many iterations and this can be one of the most +expensive steps. There are two pieces that must be optimized in order to reach +maximal efficiency when solving stiff equations: the sparsity pattern and the +construction of the Jacobian. The construction is filling the matrix +`J` with values, while the sparsity pattern is what `J` to use. + +The sparsity pattern is given by a prototype matrix, the `jac_prototype`, which +will be copied to be used as `J`. The default is for `J` to be a `Matrix`, +i.e. a dense matrix. However, if you know the sparsity of your problem, then +you can pass a different matrix type. For example, a `SparseMatrixCSC` will +give a sparse matrix. Additionally, structured matrix types like `Tridiagonal`, +`BandedMatrix` (from +[BandedMatrices.jl](https://github.com/JuliaMatrices/BandedMatrices.jl)), +`BlockBandedMatrix` (from +[BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl)), +and more can be given. DifferentialEquations.jl will internally use this matrix +type, making the factorizations faster by utilizing the specialized forms. + +For the construction, there are 3 ways to fill `J`: + +- The default, which uses normal finite/automatic differentiation +- A function `jac(J,u,p,t)` which directly computes the values of `J` +- A `colorvec` which defines a sparse differentiation scheme. + +We will now showcase how to make use of this functionality with growing complexity. + +### Declaring Jacobian Functions + +Let's solve the Rosenbrock equations: + +$$\begin{align} +dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ +dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ +dy_3 &= 3*10^7 y_{3}^2 \\ +\end{align}$$ + +In order to reduce the Jacobian construction cost, one can describe a Jacobian +function by using the `jac` argument for the `ODEFunction`. First, let's do +a standard `ODEProblem`: + +```julia +using DifferentialEquations +function rober(du,u,p,t) + y₁,y₂,y₃ = u + k₁,k₂,k₃ = p + du[1] = -k₁*y₁+k₃*y₂*y₃ + du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃ + du[3] = k₂*y₂^2 + nothing +end +prob = ODEProblem(rober,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) +sol = solve(prob,Rosenbrock23()) + +using Plots +plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) +``` + +```julia +using BenchmarkTools +@btime solve(prob) +``` + +Now we want to add the Jacobian. First we have to derive the Jacobian +$\frac{df_i}{du_j}$ which is `J[i,j]`. From this we get: + +```julia +function rober_jac(J,u,p,t) + y₁,y₂,y₃ = u + k₁,k₂,k₃ = p + J[1,1] = k₁ * -1 + J[2,1] = k₁ + J[3,1] = 0 + J[1,2] = y₃ * k₃ + J[2,2] = y₂ * k₂ * -2 + y₃ * k₃ * -1 + J[3,2] = y₂ * 2 * k₂ + J[1,3] = k₃ * y₂ + J[2,3] = k₃ * y₂ * -1 + J[3,3] = 0 + nothing +end +f = ODEFunction(rober, jac=rober_jac) +prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) + +@btime solve(prob_jac) +``` + +### Automatic Derivation of Jacobian Functions + +But that was hard! If you want to take the symbolic Jacobian of numerical +code, we can make use of [ModelingToolkit.jl](https://github.com/JuliaDiffEq/ModelingToolkit.jl) +to symbolicify the numerical code and do the symbolic calculation and return +the Julia code for this. + +```julia +using ModelingToolkit +de = modelingtoolkitize(prob) +ModelingToolkit.generate_jacobian(de...)[2] # Second is in-place +``` + +which outputs: + +```julia;eval=false +:((##MTIIPVar#376, u, p, t)->begin + #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:65 =# + #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:66 =# + let (x₁, x₂, x₃, α₁, α₂, α₃) = (u[1], u[2], u[3], p[1], p[2], p[3]) + ##MTIIPVar#376[1] = α₁ * -1 + ##MTIIPVar#376[2] = α₁ + ##MTIIPVar#376[3] = 0 + ##MTIIPVar#376[4] = x₃ * α₃ + ##MTIIPVar#376[5] = x₂ * α₂ * -2 + x₃ * α₃ * -1 + ##MTIIPVar#376[6] = x₂ * 2 * α₂ + ##MTIIPVar#376[7] = α₃ * x₂ + ##MTIIPVar#376[8] = α₃ * x₂ * -1 + ##MTIIPVar#376[9] = 0 + end + #= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:67 =# + nothing + end) +``` + +Now let's use that to give the analytical solution Jacobian: + +```julia +jac = eval(ModelingToolkit.generate_jacobian(de...)[2]) +f = ODEFunction(rober, jac=jac) +prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) +``` + +### Declaring a Sparse Jacobian + +Jacobian sparsity is declared by the `jac_prototype` argument in the `ODEFunction`. +Note that you should only do this if the sparsity is high, for example, 0.1% +of the matrix is non-zeros, otherwise the overhead of sparse matrices can be higher +than the gains from sparse differentiation! + +But as a demonstration, let's build a sparse matrix for the Rober problem. We +can do this by gathering the `I` and `J` pairs for the non-zero components, like: + +```julia +I = [1,2,1,2,3,1,2] +J = [1,1,2,2,2,3,3] +using SparseArrays +jac_prototype = sparse(I,J,1.0) +``` + +Now this is the sparse matrix prototype that we want to use in our solver, which +we then pass like: + +```julia +f = ODEFunction(rober, jac=jac, jac_prototype=jac_prototype) +prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) +``` + +### Automatic Sparsity Detection + +One of the useful companion tools for DifferentialEquations.jl is +[SparsityDetection.jl](https://github.com/JuliaDiffEq/SparsityDetection.jl). +This allows for automatic declaration of Jacobian sparsity types. To see this +in action, let's look at the 2-dimensional Brusselator equation: + +```julia +const N = 32 +const xyd_brusselator = range(0,stop=1,length=N) +brusselator_f(x, y, t) = (((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) * (t >= 1.1) * 5. +limit(a, N) = a == N+1 ? 1 : a == 0 ? N : a +function brusselator_2d_loop(du, u, p, t) + A, B, alpha, dx = p + alpha = alpha/dx^2 + @inbounds for I in CartesianIndices((N, N)) + i, j = Tuple(I) + x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]] + ip1, im1, jp1, jm1 = limit(i+1, N), limit(i-1, N), limit(j+1, N), limit(j-1, N) + du[i,j,1] = alpha*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) + + B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t) + du[i,j,2] = alpha*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) + + A*u[i,j,1] - u[i,j,1]^2*u[i,j,2] + end +end +p = (3.4, 1., 10., step(xyd_brusselator)) +``` + +Given this setup, we can give and example `input` and `output` and call `sparsity!` +on our function with the example arguments and it will kick out a sparse matrix +with our pattern, that we can turn into our `jac_prototype`. + +```julia +using SparsityDetection, SparseArrays +input = rand(32,32,2) +output = similar(input) +sparsity_pattern = sparsity!(brusselator_2d_loop,output,input,p,0.0) +jac_sparsity = Float64.(sparse(sparsity_pattern)) +``` + +Let's double check what our sparsity pattern looks like: + +```julia +using Plots +spy(jac_sparsity,markersize=1,colorbar=false,color=:deep) +``` + +That's neat, and would be tedius to build by hand! Now we just pass it to the +`ODEFunction` like as before: + +```julia +f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity) +``` + +Build the `ODEProblem`: + +```julia +function init_brusselator_2d(xyd) + N = length(xyd) + u = zeros(N, N, 2) + for I in CartesianIndices((N, N)) + x = xyd[I[1]] + y = xyd[I[2]] + u[I,1] = 22*(y*(1-y))^(3/2) + u[I,2] = 27*(x*(1-x))^(3/2) + end + u +end +u0 = init_brusselator_2d(xyd_brusselator) +prob_ode_brusselator_2d = ODEProblem(brusselator_2d_loop, + u0,(0.,11.5),p) + +prob_ode_brusselator_2d_sparse = ODEProblem(f, + u0,(0.,11.5),p) +``` + +Now let's see how the version with sparsity compares to the version without: + +```julia +@btime solve(prob_ode_brusselator_2d,save_everystep=false) +@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false) +``` + +### Declaring Color Vectors for Fast Construction + +If you cannot directly define a Jacobian function, you can use the `colorvec` +to speed up the Jacobian construction. What the `colorvec` does is allows for +calculating multiple columns of a Jacobian simultaniously by using the sparsity +pattern. An explanation of matrix coloring can be found in the +[MIT 18.337 Lecture Notes](https://mitmath.github.io/18337/lecture9/stiff_odes). + +To perform general matrix coloring, we can use +[SparseDiffTools.jl](https://github.com/JuliaDiffEq/SparseDiffTools.jl). For +example, for the Brusselator equation: + +```julia +using SparseDiffTools +colorvec = matrix_colors(jac_sparsity) +@show maximum(colorvec) +``` + +This means that we can now calculate the Jacobian in 12 function calls. This is +a nice reduction from 2048 using only automated tooling! To now make use of this +inside of the ODE solver, you simply need to declare the colorvec: + +```julia +f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity, + colorvec=colorvec) +prob_ode_brusselator_2d_sparse = ODEProblem(f, + init_brusselator_2d(xyd_brusselator), + (0.,11.5),p) +@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false) +``` + +Notice the massive speed enhancement! + +## Defining Linear Solver Routines and Jacobian-Free Newton-Krylov + +A completely different way to optimize the linear solvers for large sparse +matrices is to use a Krylov subpsace method. This requires choosing a linear +solver for changing to a Krylov method. Optionally, one can use a Jacobian-free +operator to reduce the memory requirements. + +### Declaring a Jacobian-Free Newton-Krylov Implementation + +To swap the linear solver out, we use the `linsolve` command and choose the +GMRES linear solver. + +```julia +@btime solve(prob_ode_brusselator_2d,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) +@btime solve(prob_ode_brusselator_2d_sparse,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) +``` + +For more information on linear solver choices, see the +[linear solver documentation](http://docs.juliadiffeq.org/dev/features/linear_nonlinear.html). + +On this problem, handling the sparsity correctly seemed to give much more of a +speedup than going to a Krylov approach, but that can be dependent on the problem +(and whether a good preconditioner is found). + +We can also enhance this by using a Jacobian-Free implementation of `f'(x)*v`. +To define the Jacobian-Free operator, we can use +[DiffEqOperators.jl](https://github.com/JuliaDiffEq/DiffEqOperators.jl) to generate +an operator `JacVecOperator` such that `Jv*v` performs `f'(x)*v` without building +the Jacobian matrix. + +```julia +using DiffEqOperators +Jv = JacVecOperator(brusselator_2d_loop,u0,p,0.0) +``` + +and then we can use this by making it our `jac_prototype`: + +```julia +f = ODEFunction(brusselator_2d_loop;jac_prototype=Jv) +prob_ode_brusselator_2d_jacfree = ODEProblem(f,u0,(0.,11.5),p) +@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false) +``` + +### Adding a Preconditioner + +The [linear solver documentation](http://docs.juliadiffeq.org/dev/features/linear_nonlinear.html#IterativeSolvers.jl-Based-Methods-1) +shows how you can add a preconditioner to the GMRES. For example, you can +use packages like [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl) +to add an algebraic multigrid (AMG) or [IncompleteLU.jl](https://github.com/haampie/IncompleteLU.jl) +for an incomplete LU-factorization (iLU). + +```julia +using AlgebraicMultigrid +pc = aspreconditioner(ruge_stuben(jac_sparsity)) +@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES(Pl=pc)),save_everystep=false) +``` + +## Using Structured Matrix Types + +If your sparsity pattern follows a specific structure, for example a banded +matrix, then you can declare `jac_prototype` to be of that structure and then +additional optimizations will come for free. Note that in this case, it is +not necessary to provide a `colorvec` since the color vector will be analytically +derived from the structure of the matrix. + +The matrices which are allowed are those which satisfy the +[ArrayInterface.jl](https://github.com/JuliaDiffEq/ArrayInterface.jl) interface +for automatically-colorable matrices. These include: + +- Bidiagonal +- Tridiagonal +- SymTridiagonal +- BandedMatrix ([BandedMatrices.jl](https://github.com/JuliaMatrices/BandedMatrices.jl)) +- BlockBandedMatrix ([BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl)) + +Matrices which do not satisfy this interface can still be used, but the matrix +coloring will not be automatic, and an appropriate linear solver may need to +be given (otherwise it will default to attempting an LU-decomposition). + +## Sundials-Specific Handling + +While much of the setup makes the transition to using Sundials automatic, there +are some differences between the pure Julia implementations and the Sundials +implementations which must be taken note of. These are all detailed in the +[Sundials solver documentation](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html#Sundials.jl-1), +but here we will highlight the main details which one should make note of. + +Defining a sparse matrix and a Jacobian for Sundials works just like any other +package. The core difference is in the choice of the linear solver. With Sundials, +the linear solver choice is done with a Symbol in the `linear_solver` from a +preset list. Particular choices of note are `:Band` for a banded matrix and +`:GMRES` for using GMRES. If you are using Sundials, `:GMRES` will not require +defining the JacVecOperator, and instead will always make use of a Jacobian-Free +Newton Krylov (with numerical differentiation). Thus on this problem we could do: + +```julia +using Sundials +# Sparse Version +@btime solve(prob_ode_brusselator_2d_sparse,CVODE_BDF(),save_everystep=false) +# GMRES Version: Doesn't require any extra stuff! +@btime solve(prob_ode_brusselator_2d,CVODE_BDF(linear_solver=:GMRES),save_everystep=false) +``` + +Details for setting up a preconditioner with Sundials can be found at the +[Sundials solver page](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html#Sundials.jl-1). + +## Handling Mass Matrices + +Instead of just defining an ODE as $u' = f(u,p,t)$, it can be common to express +the differential equation in the form with a mass matrix: + +$$Mu' = f(u,p,t)$$ + +where $M$ is known as the mass matrix. Let's solve the Robertson equation. +At the top we wrote this equation as: + +$$\begin{align} +dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ +dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ +dy_3 &= 3*10^7 y_{3}^2 \\ +\end{align}$$ + +But we can instead write this with a conservation relation: + +$$\begin{align} +dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\ +dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\ +1 &= y_{1} + y_{2} + y_{3} \\ +\end{align}$$ + +In this form, we can write this as a mass matrix ODE where $M$ is singular +(this is another form of a differential-algebraic equation (DAE)). Here, the +last row of `M` is just zero. We can implement this form as: + +```julia +using DifferentialEquations +function rober(du,u,p,t) + y₁,y₂,y₃ = u + k₁,k₂,k₃ = p + du[1] = -k₁*y₁+k₃*y₂*y₃ + du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃ + du[3] = y₁ + y₂ + y₃ - 1 + nothing +end +M = [1. 0 0 + 0 1. 0 + 0 0 0] +f = ODEFunction(rober,mass_matrix=M) +prob_mm = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) +sol = solve(prob_mm,Rodas5()) + +plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) +``` + +Note that if your mass matrix is singular, i.e. your system is a DAE, then you +need to make sure you choose +[a solver that is compatible with DAEs](http://docs.juliadiffeq.org/dev/solvers/dae_solve.html#Full-List-of-Methods-1) diff --git a/tutorials/exercises/01-workshop_exercises.jmd b/tutorials/exercises/01-workshop_exercises.jmd index d6be4ea3..5f4273cf 100644 --- a/tutorials/exercises/01-workshop_exercises.jmd +++ b/tutorials/exercises/01-workshop_exercises.jmd @@ -66,7 +66,7 @@ $$\begin{align} with parameter values $s=77.27$, $w=0.161$, and $q=8.375 \times 10^{-6}$, and initial conditions $x(0)=1$, $y(0)=2$, and $z(0)=3$. Use -[the tutorial on solving ODEs](http://docs.juliadiffeq.org/latest/tutorials/ode_example.html) +[the tutorial on solving ODEs](http://docs.juliadiffeq.org/dev/tutorials/ode_example.html) to solve this differential equation on the timespan of $t\in[0,360]$ with the default ODE solver. To investigate the result, plot the solution of all components over time, and plot the phase space plot of @@ -77,7 +77,7 @@ the solution (hint: use `vars=(1,2,3)`). What shape is being drawn in phase spac Because the reaction rates of `q` vs `s` is very large, this model has a "fast" system and a "slow" system. This is typical of ODEs which exhibit a property known as stiffness. Stiffness changes the ODE solvers which can handle the -equation well. [Take a look at the ODE solver page](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html) +equation well. [Take a look at the ODE solver page](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html) and investigate solving the equation using methods for non-stiff equations (ex: `Tsit5`) and stiff equations (ex: `Rodas5`). @@ -92,7 +92,7 @@ the Jacobian is costly, and thus it can be beneficial to provide the analytical solution. Use the -[ODEFunction definition page](http://docs.juliadiffeq.org/latest/features/performance_overloads.html) +[ODEFunction definition page](http://docs.juliadiffeq.org/dev/features/performance_overloads.html) to define an `ODEFunction` which holds both the OREGO ODE and its Jacobian, and solve using `Rodas5`. ## (Optional) Part 4: Automatic Symbolicification and Analytical Jacobian Calculations @@ -122,9 +122,9 @@ dz &= w(x - z)dt + \sigma_3 z dW_3\end{align}$$ with $\sigma_i = 0.1$ where the `dW` terms describe a Brownian motion, a continuous random process with normally distributed increments. Use the -[tutorial on solving SDEs](http://docs.juliadiffeq.org/latest/tutorials/sde_example.html) +[tutorial on solving SDEs](http://docs.juliadiffeq.org/dev/tutorials/sde_example.html) to solve simulate this model. Then, -[use the `EnsembleProblem`](http://docs.juliadiffeq.org/latest/features/ensemble.html) +[use the `EnsembleProblem`](http://docs.juliadiffeq.org/dev/features/ensemble.html) to generate and plot 100 trajectories of the stochastic model, and use `EnsembleSummary` to plot the mean and 5%-95% region over time. @@ -154,7 +154,7 @@ B + Z -> Y where reactions take place at a rate which is propoertional to its components, i.e. the first reaction has a rate `k*A*Y` for some `k`. -Use the [tutorial on Gillespie SSA models](http://docs.juliadiffeq.org/latest/tutorials/discrete_stochastic_example.html) +Use the [tutorial on Gillespie SSA models](http://docs.juliadiffeq.org/dev/tutorials/discrete_stochastic_example.html) to implement the `JumpProblem` for this model, and use the `EnsembleProblem` and `EnsembleSummary` to characterize the stochastic trajectories. @@ -176,7 +176,7 @@ data = [1.0 2.05224 2.11422 2.1857 2.26827 2.3641 2.47618 2.60869 2.7677 2.96232 3.0 2.82065 2.68703 2.58974 2.52405 2.48644 2.47449 2.48686 2.52337 2.58526 2.67563 2.80053 2.9713 3.21051 3.5712 4.23706 12.0266 14868.8 24987.8 23453.4 19202.2 15721.6 12872.0 10538.8 8628.66 7064.73 5784.29 4735.96 3877.66 3174.94 2599.6] ``` -[Follow the exmaples on the parameter estimation page](http://docs.juliadiffeq.org/latest/analysis/parameter_estimation.html#Bayesian-Methods-1) +[Follow the exmaples on the parameter estimation page](http://docs.juliadiffeq.org/dev/analysis/parameter_estimation.html#Bayesian-Methods-1) to perform a Bayesian parameter estimation. What are the most likely parameters for the model given the posterior parameter distributions? @@ -190,7 +190,7 @@ parallelism section for details on how to accelerate this. DiffEqBiological.jl is a helper library for the DifferentialEquations.jl ecosystem for defining chemical reaction systems at a high leevel for easy simulation in these various forms. Use the descrption -[from the Chemical Reaction Networks documentation page](http://docs.juliadiffeq.org/latest/models/biological.html) +[from the Chemical Reaction Networks documentation page](http://docs.juliadiffeq.org/dev/models/biological.html) to build a reaction network and generate the ODE/SDE/jump equations, and compare the result to your handcoded versions. @@ -223,7 +223,7 @@ $$\begin{align} with $t \in [0,90]$, $u_0 = [100.0,0]$, and $p=[K_a,K_e]=[2.268,0.07398]$. -With this model, use [the event handling documentation page](http://docs.juliadiffeq.org/latest/features/callback_functions.html) +With this model, use [the event handling documentation page](http://docs.juliadiffeq.org/dev/features/callback_functions.html) to define a `DiscreteCallback` which fires at `t ∈ [24,48,72]` and adds a dose of 100 into `[Depot]`. (Hint: you'll want to set `tstops=[24,48,72]` to force the ODE solver to step at these times). @@ -241,7 +241,7 @@ $$\begin{align} \frac{d[Central]}{dt} &= K_a [Depot](t-\tau) - K_e [Central]\end{align}$$ where the parameter $τ = 6.0$. -[Use the DDE tutorial](http://docs.juliadiffeq.org/latest/tutorials/dde_example.html) +[Use the DDE tutorial](http://docs.juliadiffeq.org/dev/tutorials/dde_example.html) to define and solve this delayed version of the hybrid model. ## Part 3: Automatic Differentiation (AD) for Optimization (I) @@ -254,7 +254,7 @@ do this is via Automatic Differentition (AD). For small numbers of parameters we will make use of ForwardDiff.jl to use Dual number arithmetic to retrive both the solution and its derivative w.r.t. parameters in a single solve. -[Use the information from the page on local sensitvity analysis](http://docs.juliadiffeq.org/latest/analysis/sensitivity.html) +[Use the information from the page on local sensitvity analysis](http://docs.juliadiffeq.org/dev/analysis/sensitivity.html) to define the input dual numbers, solve the equation, and plot both the solution over time and the derivative of the solution w.r.t. the parameters. @@ -268,7 +268,7 @@ data = [100.0 0.246196 0.000597933 0.24547 0.000596251 0.245275 0.000595453 0.24 0.0 53.7939 16.8784 58.7789 18.3777 59.1879 18.5003 59.2611] ``` -Use [the parameter estimation page](http://docs.juliadiffeq.org/latest/analysis/parameter_estimation.html) +Use [the parameter estimation page](http://docs.juliadiffeq.org/dev/analysis/parameter_estimation.html) to define a loss function with `build_loss_objective` and optimize the parameters against the data. What parameters were used to generate the data? @@ -282,7 +282,7 @@ concentration falls below 25. To model this effect, we will need to use `ContinuousCallbacks` to define a callback that triggers when `[Central]` falls below the threshold value. -[Use the documentation on the event handling page](http://docs.juliadiffeq.org/latest/features/callback_functions.html) to define such a callback, +[Use the documentation on the event handling page](http://docs.juliadiffeq.org/dev/features/callback_functions.html) to define such a callback, and plot the solution over time. How many times does the auto-doser administer a dose? How much does this change as you change the delay time $\tau$? @@ -290,7 +290,7 @@ a dose? How much does this change as you change the delay time $\tau$? To understand how the parameters effect the solution in a global sense, one wants to use Global Sensitivity Analysis. Use the -[GSA documentation page](http://docs.juliadiffeq.org/latest/analysis/global_sensitivity.html) +[GSA documentation page](http://docs.juliadiffeq.org/dev/analysis/global_sensitivity.html) perform global sensitivity analysis and quantify the effect of the various parameters on the solution. @@ -327,23 +327,67 @@ $$\begin{align} with $y(0) = [1,0,0]$ and $dy(0) = [-0.04,0.04,0.0]$ using the mass-matrix formulation and `Rodas5()`. Use the -[ODEProblem page](http://docs.juliadiffeq.org/latest/types/ode_types.html) +[ODEProblem page](http://docs.juliadiffeq.org/dev/types/ode_types.html) to find out how to declare a mass matrix. (Hint: what if the last row has all zeros?) ## Part 2: Solving the Implicit Robertson Equations with IDA -Use the [DAE Tutorial](http://docs.juliadiffeq.org/latest/tutorials/dae_example.html) +Use the [DAE Tutorial](http://docs.juliadiffeq.org/dev/tutorials/dae_example.html) to define a DAE in its implicit form and solve the Robertson equation with IDA. Why is `differential_vars = [true,true,false]`? ## Part 3: Manual Index Reduction of the Single Pendulum +The index of a DAE is a notion used to measure distance from +its related ODE. There are many different definitions of index, +but we're going to stick to the idea of differential index: +the number of differentiations required to convert a system +of DAEs into explicit ODE form. DAEs of high index are +usually transformed via a procedure called index reduction. +The following example will demonstrate this. + +Consider the index 3 DAE system of the cartesian pendulum. +After writing down the force equations in both directions, +we arrive at the following DAE: + +$$ +\begin{align} +m\ddot{x} &= \frac{x}{L}T \\ +m\ddot{y} &= \frac{y}{L}T - mg \\ +x^2 + y^2 &= L +\end{align} +$$ + +Notice that we don't have an equation describing the +behaviour of `T`. Let us now perform index reduction to +extract an equation for `T` + +Differentiate this third equation twice with respect to time +to reduce it from index 3 to index 1. + ## Part 4: Single Pendulum Solution with IDA +Write these equations in implicit form and solve the system using +IDA. ## Part 5: Solving the Double Penulum DAE System +The following equations describe a double +pendulum system: +$$ +\begin{align} +m_2\ddot{x_2} &= \frac{x_2}{L_2}T_2 \\ +m_2\ddot{y_2} &= \frac{y_2}{L_2}T_2 - m_2g \\ +{x_2}^2 + {y_2}^2 &= L_2 \\ +m_1\ddot{x_1} &= \frac{x_1}{L_1}T_1 - \frac{x_2}{L_2}T_2 \\ +m_2\ddot{y_1} &= \frac{y_1}{L_1}T_2 - m_1g - \frac{y_2}{L_2}T_2 \\ +{x_1}^2 + {y_1}^2 &= L_1 \\ +\end{align} +$$ + +Perform index reduction and solve it like in the previous example. + # Problem 4: Performance Optimizing and Parallelizing Semilinear PDE Solvers (I) This problem will focus on implementing and optimizing the solution of the @@ -411,7 +455,7 @@ your needs. Use the `sparsity!` function from [SparseDiffTools](https://github.com/JuliaDiffEq/SparseDiffTools.jl) to generate the sparsity pattern for the Jacobian of this problem. Follow -the documentations [on the DiffEqFunction page](http://docs.juliadiffeq.org/latest/features/performance_overloads.html) +the documentations [on the DiffEqFunction page](http://docs.juliadiffeq.org/dev/features/performance_overloads.html) to specify the sparsity pattern of the Jacobian. Generate an add the color vector to speed up the computation of the Jacobian. @@ -429,7 +473,7 @@ solve with an analytical sparse Jacobian. ## Part 6: Utilizing Preconditioned-GMRES Linear Solvers -Use the [linear solver specification page](http://docs.juliadiffeq.org/latest/features/linear_nonlinear.html) +Use the [linear solver specification page](http://docs.juliadiffeq.org/dev/features/linear_nonlinear.html) to solve the equation with `TRBDF2` with GMRES. Use the Sundials documentation to solve the equation with `CVODE_BDF` with Sundials' special internal GMRES. To both of these, use the [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl) @@ -437,7 +481,7 @@ to add a preconditioner to the GMRES solver. ## Part 7: Exploring IMEX and Exponential Integrator Techniques (E) -Instead of using the standard `ODEProblem`, define a [`SplitODEProblem`](http://docs.juliadiffeq.org/latest/types/split_ode_types.html) +Instead of using the standard `ODEProblem`, define a [`SplitODEProblem`](http://docs.juliadiffeq.org/dev/types/split_ode_types.html) to move some of the equation to the "non-stiff part". Try different splits and solve with `KenCarp4` to see if the solution can be accelerated. @@ -470,7 +514,7 @@ where forward sensitivity analysis (forward-mode automatic differentiation) is no longer suitable, and for these cases one uses adjoint sensitivity analysis. Rewrite the PDE so the constant terms are parameters, and use the -[adjoint sensitivity analysis](http://docs.juliadiffeq.org/latest/analysis/sensitivity.html#Adjoint-Sensitivity-Analysis-1) +[adjoint sensitivity analysis](http://docs.juliadiffeq.org/dev/analysis/sensitivity.html#Adjoint-Sensitivity-Analysis-1) documentation to solve for the solution gradient with a cost function being the L2 distance of the solution from the value 1. Solve with interpolated and checkpointed adjoints. Play with using reverse-mode automatic differentiation @@ -505,7 +549,7 @@ Solve this system over the timespan $t\in[0,1000]$ ## (Optional) Part 2: Alternative Dynamical Implmentations of Henon-Heiles (B) The Henon-Heiles defines a Hamiltonian system with certain structures which -can be utilized for a more efficient solution. Use [the Dynamical problems page](http://docs.juliadiffeq.org/latest/types/dynamical_types.html) +can be utilized for a more efficient solution. Use [the Dynamical problems page](http://docs.juliadiffeq.org/dev/types/dynamical_types.html) to define a `SecondOrderODEProblem` corresponding to the acceleration terms: $$\begin{align} @@ -524,7 +568,7 @@ Solve this problem using the `HamiltonianProblem` constructor from DiffEqPhysics To understand the orbits of the Henon-Heiles system, it can be useful to solve the system with many different initial conditions. Use the -[ensemble interface](http://docs.juliadiffeq.org/latest/features/ensemble.html) +[ensemble interface](http://docs.juliadiffeq.org/dev/features/ensemble.html) to solve with randomized initial conditions in parallel using threads with `EnsembleThreads()`. Then, use `addprocs()` to add more cores and solve using `EnsembleDistributed()`. The former will solve using all of the cores on a diff --git a/tutorials/exercises/02-workshop_solutions.jmd b/tutorials/exercises/02-workshop_solutions.jmd index c3ca1059..c1128f19 100644 --- a/tutorials/exercises/02-workshop_solutions.jmd +++ b/tutorials/exercises/02-workshop_solutions.jmd @@ -3,6 +3,13 @@ title: DifferentialEquations.jl Workshop Exercise Solutions author: Chris Rackauckas --- +```julia +using DifferentialEquations +using Sundials +using BenchmarkTools +using Plots +``` + # Problem 1: Investigating Sources of Randomness and Uncertainty in a Biological System ## Part 1: Simulating the Oregonator ODE model @@ -151,17 +158,191 @@ p = (Ka = 0.5, Ke = 0.1, τ = 4.0) # Problem 3: Differential-Algebraic Equation Modeling of a Double Pendulum (B) ## Part 1: Simple Introduction to DAEs: Mass-Matrix Robertson Equations +```julia +function f(du, u, p, t) + du[1] = -p[1]*u[1] + p[2]*u[2]*u[3] + du[2] = p[1]*u[1] - p[2]*u[2]*u[3] - p[3]*u[2]*u[2] + du[3] = u[1] + u[2] + u[3] - 1. +end +M = [1 0 0; 0 1 0; 0 0 0.] +p = [0.04, 10^4, 3e7] +u0 = [1.,0.,0.] +tspan = (0., 1e6) +prob = ODEProblem(ODEFunction(f, mass_matrix = M), u0, tspan, p) +sol = solve(prob, Rodas5()) +plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) +``` ## Part 2: Solving the Implicit Robertson Equations with IDA +```julia +# Robertson Equation DAE Implicit form +function h(out, du, u, p, t) + out[1] = -p[1]*u[1] + p[2]*u[2]*u[3] - du[1] + out[2] = p[1]*u[1] - p[2]*u[2]*u[3] - p[3]*u[2]*u[2] - du[2] + out[3] = u[1] + u[2] + u[3] - 1. +end +p = [0.04, 10^4, 3e7] +du0 = [-0.04, 0.04, 0.0] +u0 = [1.,0.,0.] +tspan = (0., 1e6) +differential_vars = [true, true, false] +prob = DAEProblem(h, du0, u0, tspan, p, differential_vars = differential_vars) +sol = solve(prob, IDA()) +plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1)) +``` ## Part 3: Manual Index Reduction of the Single Pendulum +Consider the equation: +$$ +x^2 + y^2 = L +$$ +Differentiating once with respect to time: +$$ +2x\dot{x} + 2y\dot{y} = 0 +$$ +A second time: +$$ +\begin{align} +{\dot{x}}^2 + x\ddot{x} + {\dot{y}}^2 + y\ddot{y} &= 0 \\ +u^2 + v^2 + x(\frac{x}{mL}T) + y(\frac{y}{mL}T - g) &= 0 \\ +u^2 + v^2 + \frac{x^2 + y^2}{mL}T - yg &= 0 \\ +u^2 + v^2 + \frac{T}{m} - yg &= 0 +\end{align} +$$ + +Our final set of equations is hence +$$ +\begin{align} + \ddot{x} &= \frac{x}{mL}T \\ + \ddot{y} &= \frac{y}{mL}T - g \\ + \dot{x} &= u \\ + \dot{y} &= v \\ + u^2 + v^2 -yg + \frac{T}{m} &= 0 +\end{align} +$$ + +We finally obtain $T$ into the third equation. +This required two differentiations with respect +to time, and so our system of equations went from +index 3 to index 1. Now our solver can handle the +index 1 system. ## Part 4: Single Pendulum Solution with IDA +```julia +function f(out, da, a, p, t) + (L, m, g) = p + u, v, x, y, T = a + du, dv, dx, dy, dT = da + out[1] = x*T/(m*L) - du + out[2] = y*T/(m*L) - g - dv + out[3] = u - dx + out[4] = v - dy + out[5] = u^2 + v^2 - y*g + T/m + nothing +end + +# Release pendulum from top right +u0 = zeros(5) +u0[3] = 1.0 +du0 = zeros(5) +du0[2] = 9.81 + +p = [1,1,9.8] +tspan = (0.,100.) + +differential_vars = [true, true, true, true, false] +prob = DAEProblem(f, du0, u0, tspan, p, differential_vars = differential_vars) +sol = solve(prob, IDA()) +plot(sol, vars=(3,4)) +``` ## Part 5: Solving the Double Penulum DAE System +For the double pendulum: +The equations for the second ball are the same +as the single pendulum case. That is, the equations +for the second ball are: +$$ +\begin{align} + \ddot{x_2} &= \frac{x_2}{m_2L_2}T_2 \\ + \ddot{y_2} &= \frac{y_2}{m_2L_2}T_2 - g \\ + \dot{x_2} &= u \\ + \dot{y_2} &= v \\ + u_2^2 + v_2^2 -y_2g + \frac{T_2}{m_2} &= 0 +\end{align} +$$ +For the first ball, consider $x_1^2 + y_1^2 = L $ +$$ +\begin{align} +x_1^2 + x_2^2 &= L \\ +2x_1\dot{x_1} + 2y_1\dot{y_1} &= 0 \\ +\dot{x_1}^2 + \dot{y_1}^2 + x_1(\frac{x_1}{m_1L_1}T_1 - \frac{x_2}{m_1L_2}T_2) + y_1(\frac{y_1}{m_1L_1}T_1 - g - \frac{y_2}{m_1L_2}T_2) &= 0 \\ +u_1^2 + v_1^2 + \frac{T_1}{m_1} - \frac{x_1x_2 + y_1y_2}{m_1L_2}T_2 &= 0 +\end{align} +$$ + +So the final equations are: +$$ +\begin{align} + \dot{u_2} &= x_2*T_2/(m_2*L_2) + \dot{v_2} &= y_2*T_2/(m_2*L_2) - g + \dot{x_2} &= u_2 + \dot{y_2} &= v_2 + u_2^2 + v_2^2 -y_2*g + \frac{T_2}{m_2} &= 0 + + \dot{u_1} &= x_1*T_1/(m_1*L_1) - x_2*T_2/(m_2*L_2) + \dot{v_1} &= y_1*T_1/(m_1*L_1) - g - y_2*T_2/(m_2*L_2) + \dot{x_1} &= u_1 + \dot{y_1} &= v_1 + u_1^2 + v_1^2 + \frac{T_1}{m_1} + + \frac{-x_1*x_2 - y_1*y_2}{m_1L_2}T_2 - y_1g &= 0 +\end{align} +$$ +```julia +function f(out, da, a, p, t) + L1, m1, L2, m2, g = p + + u1, v1, x1, y1, T1, + u2, v2, x2, y2, T2 = a + + du1, dv1, dx1, dy1, dT1, + du2, dv2, dx2, dy2, dT2 = da + + out[1] = x2*T2/(m2*L2) - du2 + out[2] = y2*T2/(m2*L2) - g - dv2 + out[3] = u2 - dx2 + out[4] = v2 - dy2 + out[5] = u2^2 + v2^2 -y2*g + T2/m2 + + out[6] = x1*T1/(m1*L1) - x2*T2/(m2*L2) - du1 + out[7] = y1*T1/(m1*L1) - g - y2*T2/(m2*L2) - dv1 + out[8] = u1 - dx1 + out[9] = v1 - dy1 + out[10] = u1^2 + v1^2 + T1/m1 + + (-x1*x2 - y1*y2)/(m1*L2)*T2 - y1*g + nothing +end -# Problem 4: Performance Optimizing and Parallelizing Semilinear PDE Solvers (I) +# Release pendulum from top right +u0 = zeros(10) +u0[3] = 1.0 +u0[8] = 1.0 +du0 = zeros(10) +du0[2] = 9.8 +du0[7] = 9.8 + +p = [1,1,1,1,9.8] +tspan = (0.,100.) + +differential_vars = [true, true, true, true, false, + true, true, true, true, false] +prob = DAEProblem(f, du0, u0, tspan, p, differential_vars = differential_vars) +sol = solve(prob, IDA()) +plot(sol, vars=(3,4)) +plot(sol, vars=(8,9)) +``` + +# Problem 4: Performance Optimizing and Parallelizing Semilinear PDE Solvers (I) ## Part 1: Implementing the BRUSS PDE System as ODEs ```julia diff --git a/tutorials/introduction/01-ode_introduction.jmd b/tutorials/introduction/01-ode_introduction.jmd index b3894846..4aab979e 100644 --- a/tutorials/introduction/01-ode_introduction.jmd +++ b/tutorials/introduction/01-ode_introduction.jmd @@ -5,7 +5,7 @@ author: Chris Rackauckas ## Basic Introduction Via Ordinary Differential Equations -This notebook will get you started with DifferentialEquations.jl by introducing you to the functionality for solving ordinary differential equations (ODEs). The corresponding documentation page is the [ODE tutorial](http://docs.juliadiffeq.org/latest/tutorials/ode_example.html). While some of the syntax may be different for other types of equations, the same general principles hold in each case. Our goal is to give a gentle and thorough introduction that highlights these principles in a way that will help you generalize what you have learned. +This notebook will get you started with DifferentialEquations.jl by introducing you to the functionality for solving ordinary differential equations (ODEs). The corresponding documentation page is the [ODE tutorial](http://docs.juliadiffeq.org/dev/tutorials/ode_example.html). While some of the syntax may be different for other types of equations, the same general principles hold in each case. Our goal is to give a gentle and thorough introduction that highlights these principles in a way that will help you generalize what you have learned. ### Background @@ -55,14 +55,14 @@ and that's it: we have succesfully solved our first ODE! #### Analyzing the Solution -Of course, the solution type is not interesting in and of itself. We want to understand the solution! The documentation page which explains in detail the functions for analyzing the solution is the [Solution Handling](http://docs.juliadiffeq.org/latest/basics/solution.html) page. Here we will describe some of the basics. You can plot the solution using the plot recipe provided by [Plots.jl](http://docs.juliaplots.org/latest/): +Of course, the solution type is not interesting in and of itself. We want to understand the solution! The documentation page which explains in detail the functions for analyzing the solution is the [Solution Handling](http://docs.juliadiffeq.org/dev/basics/solution.html) page. Here we will describe some of the basics. You can plot the solution using the plot recipe provided by [Plots.jl](http://docs.juliaplots.org/dev/): ```julia using Plots; gr() plot(sol) ``` -From the picture we see that the solution is an exponential curve, which matches our intuition. As a plot recipe, we can annotate the result using any of the [Plots.jl attributes](http://docs.juliaplots.org/latest/attributes/). For example: +From the picture we see that the solution is an exponential curve, which matches our intuition. As a plot recipe, we can annotate the result using any of the [Plots.jl attributes](http://docs.juliaplots.org/dev/attributes/). For example: ```julia plot(sol,linewidth=5,title="Solution to the linear ODE with a thick line", @@ -107,7 +107,7 @@ sol(0.45) #### Controlling the Solver -DifferentialEquations.jl has a common set of solver controls among its algorithms which can be found [at the Common Solver Options](http://docs.juliadiffeq.org/latest/basics/common_solver_opts.html) page. We will detail some of the most widely used options. +DifferentialEquations.jl has a common set of solver controls among its algorithms which can be found [at the Common Solver Options](http://docs.juliadiffeq.org/dev/basics/common_solver_opts.html) page. We will detail some of the most widely used options. The most useful options are the tolerances `abstol` and `reltol`. These tell the internal adaptive time stepping engine how precise of a solution you want. Generally, `reltol` is the relative accuracy while `abstol` is the accuracy when `u` is near zero. These tolerances are local tolerances and thus are not global guarantees. However, a good rule of thumb is that the total solution accuracy is 1-2 digits less than the relative tolerances. Thus for the defaults `abstol=1e-6` and `reltol=1e-3`, you can expect a global accuracy of about 1-2 digits. If we want to get around 6 digits of accuracy, we can use the commands: @@ -157,7 +157,7 @@ sol = solve(prob,save_everystep=false,save_start = false) Note that similarly on the other side there is `save_end=false`. -More advanced saving behaviors, such as saving functionals of the solution, are handled via the `SavingCallback` in the [Callback Library](http://docs.juliadiffeq.org/latest/features/callback_library.html#SavingCallback-1) which will be addressed later in the tutorial. +More advanced saving behaviors, such as saving functionals of the solution, are handled via the `SavingCallback` in the [Callback Library](http://docs.juliadiffeq.org/dev/features/callback_library.html#SavingCallback-1) which will be addressed later in the tutorial. #### Choosing Solver Algorithms @@ -316,7 +316,7 @@ Not only is the DSL convenient syntax, but it does some magic behind the scenes. lv!.Jex ``` -The DSL can derive many other functions; this ability is used to speed up the solvers. An extension to DifferentialEquations.jl, [Latexify.jl](https://korsbo.github.io/Latexify.jl/latest/tutorials/parameterizedfunctions.html), allows you to extract these pieces as LaTeX expressions. +The DSL can derive many other functions; this ability is used to speed up the solvers. An extension to DifferentialEquations.jl, [Latexify.jl](https://korsbo.github.io/Latexify.jl/dev/tutorials/parameterizedfunctions.html), allows you to extract these pieces as LaTeX expressions. ## Internal Types diff --git a/tutorials/introduction/02-choosing_algs.jmd b/tutorials/introduction/02-choosing_algs.jmd index 55525b8b..c473bea0 100644 --- a/tutorials/introduction/02-choosing_algs.jmd +++ b/tutorials/introduction/02-choosing_algs.jmd @@ -3,7 +3,7 @@ title: Choosing an ODE Algorithm author: Chris Rackauckas --- -While the default algorithms, along with `alg_hints = [:stiff]`, will suffice in most cases, there are times when you may need to exert more control. The purpose of this part of the tutorial is to introduce you to some of the most widely used algorithm choices and when they should be used. The corresponding page of the documentation is the [ODE Solvers](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html) page which goes into more depth. +While the default algorithms, along with `alg_hints = [:stiff]`, will suffice in most cases, there are times when you may need to exert more control. The purpose of this part of the tutorial is to introduce you to some of the most widely used algorithm choices and when they should be used. The corresponding page of the documentation is the [ODE Solvers](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html) page which goes into more depth. ## Diagnosing Stiffness diff --git a/tutorials/introduction/03-optimizing_diffeq_code.jmd b/tutorials/introduction/03-optimizing_diffeq_code.jmd index ddd109db..6a9971d7 100644 --- a/tutorials/introduction/03-optimizing_diffeq_code.jmd +++ b/tutorials/introduction/03-optimizing_diffeq_code.jmd @@ -445,7 +445,7 @@ Lastly, we can do other things like multithread the main loops, but these optimi This gets us to about 8x faster than our original MATLAB/SciPy/R vectorized style code! -The last thing to do is then ***optimize our algorithm choice***. We have been using `Tsit5()` as our test algorithm, but in reality this problem is a stiff PDE discretization and thus one recommendation is to use `CVODE_BDF()`. However, instead of using the default dense Jacobian, we should make use of the sparse Jacobian afforded by the problem. The Jacobian is the matrix $\frac{df_i}{dr_j}$, where $r$ is read by the linear index (i.e. down columns). But since the $u$ variables depend on the $v$, the band size here is large, and thus this will not do well with a Banded Jacobian solver. Instead, we utilize sparse Jacobian algorithms. `CVODE_BDF` allows us to use a sparse Newton-Krylov solver by setting `linear_solver = :GMRES` (see [the solver documentation](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html#Sundials.jl-1), and thus we can solve this problem efficiently. Let's see how this scales as we increase the integration time. +The last thing to do is then ***optimize our algorithm choice***. We have been using `Tsit5()` as our test algorithm, but in reality this problem is a stiff PDE discretization and thus one recommendation is to use `CVODE_BDF()`. However, instead of using the default dense Jacobian, we should make use of the sparse Jacobian afforded by the problem. The Jacobian is the matrix $\frac{df_i}{dr_j}$, where $r$ is read by the linear index (i.e. down columns). But since the $u$ variables depend on the $v$, the band size here is large, and thus this will not do well with a Banded Jacobian solver. Instead, we utilize sparse Jacobian algorithms. `CVODE_BDF` allows us to use a sparse Newton-Krylov solver by setting `linear_solver = :GMRES` (see [the solver documentation](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html#Sundials.jl-1), and thus we can solve this problem efficiently. Let's see how this scales as we increase the integration time. ```julia prob = ODEProblem(fast_gm!,r0,(0.0,10.0),p) diff --git a/tutorials/introduction/04-callbacks_and_events.jmd b/tutorials/introduction/04-callbacks_and_events.jmd index b1b98050..810d7b00 100644 --- a/tutorials/introduction/04-callbacks_and_events.jmd +++ b/tutorials/introduction/04-callbacks_and_events.jmd @@ -7,7 +7,7 @@ In working with a differential equation, our system will evolve through many sta These callbacks allow for a lot more than event handling, however. For example, we can use callbacks to achieve high-level behavior like exactly preserve conservation laws and save the trace of a matrix at pre-defined time points. This extra functionality allows us to use the callback system as a modding system for the DiffEq ecosystem's solvers. -This tutorial is an introduction to the callback and event handling system in DifferentialEquations.jl, documented in the [Event Handling and Callback Functions](http://docs.juliadiffeq.org/latest/features/callback_functions.html) page of the documentation. We will also introduce you to some of the most widely used callbacks in the [Callback Library](http://docs.juliadiffeq.org/latest/features/callback_library.html), which is a library of pre-built mods. +This tutorial is an introduction to the callback and event handling system in DifferentialEquations.jl, documented in the [Event Handling and Callback Functions](http://docs.juliadiffeq.org/dev/features/callback_functions.html) page of the documentation. We will also introduce you to some of the most widely used callbacks in the [Callback Library](http://docs.juliadiffeq.org/dev/features/callback_library.html), which is a library of pre-built mods. ## Events and Continuous Callbacks @@ -31,7 +31,7 @@ function condition(u,t,integrator) end ``` -Recall that the `condition` will trigger when it evaluates to zero, and here it will evaluate to zero when `u[1] == 0`, which occurs when `v == 0`. *Now we have to say what we want the callback to do.* Callbacks make use of the [Integrator Interface](http://docs.juliadiffeq.org/latest/basics/integrator.html). Instead of giving a full description, a quick and usable rundown is: +Recall that the `condition` will trigger when it evaluates to zero, and here it will evaluate to zero when `u[1] == 0`, which occurs when `v == 0`. *Now we have to say what we want the callback to do.* Callbacks make use of the [Integrator Interface](http://docs.juliadiffeq.org/dev/basics/integrator.html). Instead of giving a full description, a quick and usable rundown is: - Values are strored in `integrator.u` - Times are stored in `integrator.t` @@ -163,7 +163,7 @@ sol = solve(prob) plot(sol) ``` -Let's instead stop the integration when a condition is met. From the [Integrator Interface stepping controls](http://docs.juliadiffeq.org/latest/basics/integrator.html#Stepping-Controls-1) we see that `terminate!(integrator)` will cause the integration to end. So our new `affect!` is simply: +Let's instead stop the integration when a condition is met. From the [Integrator Interface stepping controls](http://docs.juliadiffeq.org/dev/basics/integrator.html#Stepping-Controls-1) we see that `terminate!(integrator)` will cause the integration to end. So our new `affect!` is simply: ```julia function terminate_affect!(integrator) @@ -210,7 +210,7 @@ plot(sol) ## Callback Library -As you can see, callbacks can be very useful and through `CallbackSets` we can merge together various behaviors. Because of this utility, there is a library of pre-built callbacks known as the [Callback Library](http://docs.juliadiffeq.org/latest/features/callback_library.html). We will walk through a few examples where these callbacks can come in handy. +As you can see, callbacks can be very useful and through `CallbackSets` we can merge together various behaviors. Because of this utility, there is a library of pre-built callbacks known as the [Callback Library](http://docs.juliadiffeq.org/dev/features/callback_library.html). We will walk through a few examples where these callbacks can come in handy. ### Manifold Projection @@ -234,7 +234,7 @@ Notice that what's going on is that the numerical solution is drifting from the plot(sol.t,[u[2]^2 + u[1]^2 for u in sol.u]) # Energy ~ x^2 + v^2 ``` -Some integration techniques like [symplectic integrators](http://docs.juliadiffeq.org/latest/solvers/dynamical_solve.html#Symplectic-Integrators-1) are designed to mitigate this issue, but instead let's tackle the problem by enforcing conservation of energy. To do so, we define our manifold as the one where energy equals 1 (since that holds in the initial condition), that is: +Some integration techniques like [symplectic integrators](http://docs.juliadiffeq.org/dev/solvers/dynamical_solve.html#Symplectic-Integrators-1) are designed to mitigate this issue, but instead let's tackle the problem by enforcing conservation of energy. To do so, we define our manifold as the one where energy equals 1 (since that holds in the initial condition), that is: ```julia function g(resid,u,p,t) @@ -262,7 +262,7 @@ u1,u2 = sol[500] u2^2 + u1^2 ``` -While choosing different integration schemes and using lower tolerances can achieve this effect as well, this can be a nice way to enforce physical constraints and is thus used in many disciplines like molecular dynamics. Another such domain constraining callback is the [`PositiveCallback()`](http://docs.juliadiffeq.org/latest/features/callback_library.html#PositiveDomain-1) which can be used to enforce positivity of the variables. +While choosing different integration schemes and using lower tolerances can achieve this effect as well, this can be a nice way to enforce physical constraints and is thus used in many disciplines like molecular dynamics. Another such domain constraining callback is the [`PositiveCallback()`](http://docs.juliadiffeq.org/dev/features/callback_library.html#PositiveDomain-1) which can be used to enforce positivity of the variables. ### SavingCallback diff --git a/tutorials/introduction/05-formatting_plots.jmd b/tutorials/introduction/05-formatting_plots.jmd index 49fc54c0..444e9c04 100644 --- a/tutorials/introduction/05-formatting_plots.jmd +++ b/tutorials/introduction/05-formatting_plots.jmd @@ -3,7 +3,7 @@ title: Formatting Plots author: Chris Rackauckas --- -Since the plotting functionality is implemented as a recipe to Plots.jl, [all of the options open to Plots.jl can be used in our plots](https://juliaplots.github.io/supported/). In addition, there are special features specifically for [differential equation plots](http://docs.juliadiffeq.org/latest/basics/plot.html). This tutorial will teach some of the most commonly used options. Let's first get the solution to some ODE. Here I will use one of the Lorenz ordinary differential equation. As with all commands in DifferentialEquations.jl, I got a plot of the solution by calling `solve` on the problem, and `plot` on the solution: +Since the plotting functionality is implemented as a recipe to Plots.jl, [all of the options open to Plots.jl can be used in our plots](https://juliaplots.github.io/supported/). In addition, there are special features specifically for [differential equation plots](http://docs.juliadiffeq.org/dev/basics/plot.html). This tutorial will teach some of the most commonly used options. Let's first get the solution to some ODE. Here I will use one of the Lorenz ordinary differential equation. As with all commands in DifferentialEquations.jl, I got a plot of the solution by calling `solve` on the problem, and `plot` on the solution: ```julia using DifferentialEquations, Plots, ParameterizedFunctions @@ -25,7 +25,7 @@ sol = solve(prob) plot(sol) ``` -Now let's change it to a phase plot. As discussed in the [plot functions page](http://docs.juliadiffeq.org/latest/basics/plot.html), we can use the `vars` command to choose the variables to plot. Let's plot variable `x` vs variable `y` vs variable `z`: +Now let's change it to a phase plot. As discussed in the [plot functions page](http://docs.juliadiffeq.org/dev/basics/plot.html), we can use the `vars` command to choose the variables to plot. Let's plot variable `x` vs variable `y` vs variable `z`: ```julia plot(sol,vars=(1, 2, 3)) diff --git a/tutorials/models/03-diffeqbio_I_introduction.jmd b/tutorials/models/03-diffeqbio_I_introduction.jmd index 7839db4d..e00d3b90 100644 --- a/tutorials/models/03-diffeqbio_I_introduction.jmd +++ b/tutorials/models/03-diffeqbio_I_introduction.jmd @@ -29,7 +29,7 @@ pyplot(fmt=:svg); We now construct the reaction network. The basic types of arrows and predefined rate laws one can use are discussed in detail within the DiffEqBiological [Chemical Reaction Models -documentation](http://docs.juliadiffeq.org/latest/models/biological.html). Here +documentation](http://docs.juliadiffeq.org/dev/models/biological.html). Here we use a mix of first order, zero order and repressive Hill function rate laws. Note, $\varnothing$ corresponds to the empty state, and is used for zeroth order production and first order degradation reactions: @@ -58,7 +58,8 @@ generated rate laws for each reaction latexify(repressilator; env=:chemical) ``` ```julia; echo=false; skip="notebook"; -x = latexify(repressilator; env=:chemical, starred=true, mathjax=true); +mathjax = WEAVE_ARGS[:doctype] == "pdf" ? false : true +x = latexify(repressilator; env=:chemical, starred=true, mathjax=mathjax); display("text/latex", "$x"); ``` @@ -66,10 +67,10 @@ We can also use Latexify to look at the corresponding ODE model for the chemical system ```julia; results="hidden"; -latexify(repressilator) +latexify(repressilator, cdot=false) ``` ```julia; echo=false; skip="notebook"; -x = latexify(repressilator, starred=true); +x = latexify(repressilator, cdot=false, starred=true); display("text/latex", "$x"); ``` @@ -118,7 +119,7 @@ plot(sol, fmt=:svg) We see the well-known oscillatory behavior of the repressilator! For more on choices of ODE solvers, see the JuliaDiffEq -[documentation](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html). +[documentation](http://docs.juliadiffeq.org/dev/solvers/ode_solve.html). --- @@ -147,7 +148,7 @@ Here we see that oscillations remain, but become much noiser. Note, in constructing the `JumpProblem` we could have used any of the SSAs that are part of DiffEqJump instead of the `Direct` method, see the list of SSAs (i.e. constant rate jump aggregators) in the -[documentation](http://docs.juliadiffeq.org/latest/types/jump_types.html#Constant-Rate-Jump-Aggregators-1). +[documentation](http://docs.juliadiffeq.org/dev/types/jump_types.html#Constant-Rate-Jump-Aggregators-1). --- ## $\tau$-leaping Methods: @@ -189,12 +190,17 @@ tspan = (0.,4.); The corresponding Chemical Langevin Equation SDE is then -$$ -dX_t = \left(c_1 X - c_2 X + c_3 \right) dt + \left( \sqrt{c_1 X} - \sqrt{c_2 X} + \sqrt{c_3} \right)dW_t, -$$ +```julia; results="hidden"; +latexify(bdp, noise=true, cdot=false) +``` +```julia; echo=false; skip="notebook"; +x = latexify(bdp, noise=true, cdot=false, starred=true); +display("text/latex", "$x"); +``` -where $W_t$ denotes a standard Brownian Motion. We can solve the CLE SDE model -by creating an SDEProblem and solving it similar to what we did for ODEs above: +where each $W_i(t)$ denotes an independent Brownian Motion. We can solve the CLE +SDE model by creating an `SDEProblem` and solving it similar to what we did for +ODEs above: ```julia # SDEProblem for CLE @@ -208,7 +214,7 @@ plot(sol, fmt=:svg) We again have complete freedom to select any of the StochasticDifferentialEquations.jl SDE solvers, see the -[documentation](http://docs.juliadiffeq.org/latest/solvers/sde_solve.html). +[documentation](http://docs.juliadiffeq.org/dev/solvers/sde_solve.html). --- ## What information can be queried from the reaction_network: @@ -220,24 +226,24 @@ The generated `reaction_network` contains a lot of basic information. For exampl returns the Jacobian of the ODEs in `J`. A corresponding Jacobian matrix of expressions can be accessed using the `jacobianexprs` function: ```julia; results="hidden"; -latexify(jacobianexprs(repressilator)) +latexify(jacobianexprs(repressilator), cdot=false) ``` ```julia; echo=false; skip="notebook"; -x = latexify(jacobianexprs(repressilator), starred=true); +x = latexify(jacobianexprs(repressilator), cdot=false, starred=true); display("text/latex", "$x"); ``` - `pjac = paramjacfun(repressilator)` is a function `pjac(pJ,u,p,t)` that evaluates and returns the Jacobian, `pJ`, of the ODEs *with respect to the parameters*. This allows `reaction_network`s to be used in the DifferentialEquations.jl local sensitivity analysis package - [DiffEqSensitivity](http://docs.juliadiffeq.org/latest/analysis/sensitivity.html). + [DiffEqSensitivity](http://docs.juliadiffeq.org/dev/analysis/sensitivity.html). By default, generated `ODEProblems` will be passed the corresponding Jacobian function, which will then be used within implicit ODE/SDE methods. The [DiffEqBiological API -documentation](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html) provides +documentation](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html) provides a thorough description of the many query functions that are provided to access network properties and generated functions. In DiffEqBiological Tutorial II we'll explore the API. diff --git a/tutorials/models/04-diffeqbio_II_networkproperties.jmd b/tutorials/models/04-diffeqbio_II_networkproperties.jmd index 8aa6f918..a686fe96 100644 --- a/tutorials/models/04-diffeqbio_II_networkproperties.jmd +++ b/tutorials/models/04-diffeqbio_II_networkproperties.jmd @@ -4,7 +4,7 @@ author: Samuel Isaacson --- The [DiffEqBiological -API](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html) provides a +API](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html) provides a collection of functions for easily accessing network properties, and for incrementally building and extending a network. In this tutorial we'll go through the API, and then illustrate how to programmatically construct a @@ -45,7 +45,7 @@ display("text/latex", "$x"); --- ## Network Properties [Basic -properties](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Basic-properties-1) +properties](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Basic-properties-1) of the generated network include the `speciesmap` and `paramsmap` functions we examined in the last tutorial, along with the corresponding `species` and `params` functions: @@ -61,7 +61,7 @@ The numbers of species, parameters and reactions can be accessed using `numspecies(rn)`, `numparams(rn)` and `numreactions(rn)`. A number of functions are available to access [properties of -reactions](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Reaction-Properties-1) +reactions](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Reaction-Properties-1) within the generated network, including `substrates`, `products`, `dependents`, `ismassaction`, `substratestoich`, `substratesymstoich`, `productstoich`, `productsymstoich`, and `netstoich`. Each of these functions takes two @@ -93,7 +93,7 @@ and `productstoich` are defined similarly. Several functions are also provided that calculate different types of [dependency -graphs](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Dependency-Graphs-1). +graphs](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Dependency-Graphs-1). These include `rxtospecies_depgraph`, which provides a mapping from reaction index to the indices of species whose population changes when the reaction occurs: @@ -136,7 +136,7 @@ returning information that is already stored within the generated `reaction_network`. For these functions, modifying the returned data structures may lead to inconsistent internal state within the network. As such, they should be used for accessing, but not modifying, network properties. The [API -documentation](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html) +documentation](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html) indicates which functions return newly allocated data structures and which return data stored within the `reaction_network`. @@ -158,7 +158,7 @@ extended through a programmatic interface: `@min_reaction_network` and `@empty_reaction_network`. We now give an introduction to constructing these more minimal network representations, and how they can be programmatically extended. See also the relevant [API -section](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Reaction-Network-Generation-Macros-1). +section](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Reaction-Network-Generation-Macros-1). The `@min_reaction_network` macro works identically to the `@reaction_network` macro, but the generated network will only be complete with respect to its @@ -187,7 +187,7 @@ the missing reactions. Note, it is required that species and parameters be defined before any reactions using them are added. The necessary network extension functions are given by `addspecies!`, `addparam!` and `addreaction!`, and described in the -[API](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Functions-to-Add-Species,-Parameters-and-Reactions-to-a-Network-1). To complete `rnmin` we first add the relevant +[API](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Functions-to-Add-Species,-Parameters-and-Reactions-to-a-Network-1). To complete `rnmin` we first add the relevant species: ```julia @@ -271,7 +271,7 @@ evaluating Jacobians. For large networks this can give a significant speed-up in the time required for constructing an ODE model. Each function and its associated keyword arguments are described in the API section, [Functions to add ODEs, SDEs or Jumps to a -Network](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Functions-to-Add-ODEs,-SDEs-or-Jumps-to-a-Network-1). +Network](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Functions-to-Add-ODEs,-SDEs-or-Jumps-to-a-Network-1). Let's extend `rnmin` to include the needed functions for use in ODE solvers: @@ -281,13 +281,13 @@ addodes!(rnmin) ``` The [Generated Functions for -Models](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Generated-Functions-for-Models-1) +Models](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Generated-Functions-for-Models-1) section of the API shows what functions have been generated. For ODEs these include `oderhsfun(rnmin)`, which returns a function of the form `f(du,u,p,t)` which evaluates the ODEs (i.e. the time derivatives of `u`) within `du`. For each generated function, the corresponding expressions from which it was generated can be retrieved using accessors from the [Generated -Expressions](http://docs.juliadiffeq.org/latest/apis/diffeqbio.html#Generated-Expressions-1) +Expressions](http://docs.juliadiffeq.org/dev/apis/diffeqbio.html#Generated-Expressions-1) section of the API. The equations within `du` can be retrieved using the `odeexprs(rnmin)` function. For example: @@ -324,7 +324,7 @@ derivative functions with respect to the parameters. `paramjacfun(rnmin)` then returns the generated function. It has the form `fpjac(dPJ,u,p,t)`, which given the current solution `u` evaluates the Jacobian matrix with respect to parameters `p` within `dPJ`. For use in DifferentialEquations.jl solvers, an -[`ODEFunction`](http://docs.juliadiffeq.org/latest/features/performance_overloads.html) +[`ODEFunction`](http://docs.juliadiffeq.org/dev/features/performance_overloads.html) representation of the ODEs is available from `odefun(rnmin)`. `addsdes!` and `addjumps!` work similarly to complete the network for use in @@ -412,10 +412,10 @@ oprob = ODEProblem(rn, u₀, tspan, p) We are now ready to solve the problem and plot the solution. Since we have essentially generated a method of lines discretization of the diffusion equation with a discontinuous initial condition, we'll use an A-L stable implicit ODE -solver, `KenCarp4`, and plot the solution at a few times: +solver, `Rodas5`, and plot the solution at a few times: ```julia -sol = solve(oprob, KenCarp4()) +sol = solve(oprob, Rodas5()) times = [0., .0001, .001, .01] plt = plot() for time in times diff --git a/tutorials/models/04b-diffeqbio_III_steadystates.jmd b/tutorials/models/04b-diffeqbio_III_steadystates.jmd new file mode 100644 index 00000000..079644db --- /dev/null +++ b/tutorials/models/04b-diffeqbio_III_steadystates.jmd @@ -0,0 +1,191 @@ +--- +title: "DiffEqBiological Tutorial III: Steady-States and Bifurcations" +author: Torkel Loman and Samuel Isaacson +--- + +Several types of steady state analysis can be performed for networks defined +with DiffEqBiological by utilizing homotopy continuation. This allows for +finding the steady states and bifurcations within a large class of systems. In +this tutorial we'll go through several examples of using this functionality. + +We start by loading the necessary packages: +```julia +using DiffEqBiological, Plots +gr(); default(fmt = :png); +``` + +### Steady states and stability of a biochemical reaction network. +Bistable switches are well known biological motifs, characterised by the +presence of two different stable steady states. + +```julia +bistable_switch = @reaction_network begin + d, (X,Y) → ∅ + hillR(Y,v1,K1,n1), ∅ → X + hillR(X,v2,K2,n2), ∅ → Y +end d v1 K1 n1 v2 K2 n2 +d = 0.01; +v1 = 1.5; K1 = 30; n1 = 3; +v2 = 1.; K2 = 30; n2 = 3; +bistable_switch_p = [d, v1 ,K1, n1, v2, K2, n2]; +``` + +The steady states can be found using the `steady_states` function (which takes a reaction network and a set of parameter values as input). The stability of these steady states can be found using the `stability` function. + +```julia +ss = steady_states(bistable_switch, bistable_switch_p) +``` + +```julia +stability(ss,bistable_switch, bistable_switch_p) +``` + +Since the equilibration methodology is based on homotopy continuation, it is not +able to handle systems with non-integer exponents, or non polynomial reaction +rates. Neither of the following two systems will work. + +This system contains a non-integer exponent: +```julia +rn1 = @reaction_network begin + p, ∅ → X + hill(X,v,K,n), X → ∅ +end p v K n +p1 = [1.,2.5,1.5,1.5] +steady_states(rn1,p1) +``` + +This system contains a logarithmic reaction rate: +```julia +rn2 = @reaction_network begin + p, ∅ → X + log(X), X → ∅ +end p +p2 = [1.] +steady_states(rn2,p2) +``` + +### Bifurcation diagrams for biochemical reaction networks +Bifurcation diagrams illustrate how the steady states of a system depend on one +or more parameters. They can be computed with the `bifurcations` function. It +takes the same arguments as `steady_states`, with the addition of the parameter +one wants to vary, and an interval over which to vary it: + +```julia +bif = bifurcations(bistable_switch, bistable_switch_p, :v1, (.1,5.)) +plot(bif,ylabel="[X]",label="") +plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) +``` + +The values for the second variable in the system can also be displayed, by +giving that as an additional input to `plot` (it is the second argument, directly +after the bifurcation diagram object): + +```julia +plot(bif,2,ylabel="[Y]") +plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) +``` + +The `plot` function also accepts all other arguments which the Plots.jl `plot` function accepts. + +```julia +bif = bifurcations(bistable_switch, bistable_switch_p,:v1,(.1,10.)) +plot(bif,linewidth=1.,title="A bifurcation diagram",ylabel="Steady State concentration") +plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) +``` + +Certain parameters, like `n1`, cannot be sensibly varied over a continuous +interval. Instead, a discrete bifurcation diagram can be calculated with the +`bifurcation_grid` function. Instead of an interval, the last argument is a +range of numbers: + +```julia +bif = bifurcation_grid(bistable_switch, bistable_switch_p,:n1,1.:5.) +plot(bif) +scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) +``` + +### Bifurcation diagrams over two dimensions +In addition to the bifurcation diagrams illustrated above, where only a single +variable is varied, it is also possible to investigate the steady state +properties of s system as two different parameters are varied. Due to the nature +of the underlying bifurcation algorithm it is not possible to continuously vary +both parameters. Instead, a set of discrete values are selected for the first +parameter, and a continuous interval for the second. Next, for each discrete +value of the first parameter, a normal bifurcation diagram is created over the +interval given for the second parameter. + +```julia +bif = bifurcation_grid_diagram(bistable_switch, bistable_switch_p,:n1,0.:4.,:v1,(.1,5.)) +plot(bif) +plot!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) +``` + +In the single variable case we could use a `bifurcation_grid` to investigate the +behavior of a parameter which could only attain discrete values. In the same +way, if we are interested in two parameters, both of which require integer +values, we can use `bifrucation_grid_2d`. In our case, this is required if we +want to vary both the parameters `n1` and `n2`: + +```julia +bif = bifurcation_grid_2d(bistable_switch, bistable_switch_p,:n1,1.:3.,:n2,1.:10.) +plot(bif) +scatter!([[],[]],color=[:blue :red],label = ["Stable" "Unstable"]) +``` + +### The Brusselator +The Brusselator is a well know reaction network, which may or may not oscillate, +depending on parameter values. + +```julia +brusselator = @reaction_network begin + A, ∅ → X + 1, 2X + Y → 3X + B, X → Y + 1, X → ∅ +end A B; +A = 0.5; B = 4.; +brusselator_p = [A, B]; +``` + +The system has only one steady state, for $(X,Y)=(A,B/A)$ This fixed point +becomes unstable when $B > 1+A^2$, leading to oscillations. Bifurcation diagrams +can be used to determine the system's stability, and hence look for where oscillations might appear in the Brusselator: + +```julia +bif = bifurcations(brusselator,brusselator_p,:B,(0.1,2.5)) +plot(bif,2) +plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"]) +``` + +Here red and yellow colors label unstable steady-states, while blue and cyan +label stable steady-states. (In addition, yellow and cyan correspond to points +where at least one eigenvalue of the Jacobian is imaginary, while red and blue +correspond to points with real-valued eigenvalues.) + +Given `A=0.5`, the point at which the system should become unstable is `B=1.25`. We can confirm this in the bifurcation diagram. + +We can also investigate the behavior when we vary both parameters of the system: + +```julia +bif = bifurcation_grid_diagram(brusselator,brusselator_p,:B,0.5:0.02:5.0,:A,(0.2,5.0)) +plot(bif) +plot!([[],[],[],[]],color=[:blue :cyan :orange :red],label = ["Stable Real" "Stable Complex" "Unstable Complex" "Unstable Real"]) +``` + +--- +## Getting Help +Have a question related to DiffEqBiological or this tutorial? Feel free to ask +in the DifferentialEquations.jl [Gitter](https://gitter.im/JuliaDiffEq/Lobby). +If you think you've found a bug in DiffEqBiological, or would like to +request/discuss new functionality, feel free to open an issue on +[Github](https://github.com/JuliaDiffEq/DiffEqBiological.jl) (but please check +there is no related issue already open). If you've found a bug in this tutorial, +or have a suggestion, feel free to open an issue on the [DiffEqTutorials Github +site](https://github.com/JuliaDiffEq/DiffEqTutorials.jl). Or, submit a pull +request to DiffEqTutorials updating the tutorial! + +--- +```julia; echo=false; skip="notebook" +using DiffEqTutorials +DiffEqTutorials.tutorial_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file], remove_homedir=true) +```