diff --git a/src/OrdinaryDiffEq.jl b/src/OrdinaryDiffEq.jl index 4cefb0aec1..32427d5481 100644 --- a/src/OrdinaryDiffEq.jl +++ b/src/OrdinaryDiffEq.jl @@ -399,7 +399,7 @@ export SymplecticEuler, VelocityVerlet, VerletLeapfrog, PseudoVerletLeapfrog, export SplitEuler -export Nystrom4, FineRKN4, FineRKN5, Nystrom4VelocityIndependent, +export Nystrom4, FineRKN4, FineRKN5, SharpFineRKN6, Nystrom4VelocityIndependent, Nystrom5VelocityIndependent, IRKN3, IRKN4, DPRKN4, DPRKN5, DPRKN6, DPRKN6FM, DPRKN8, DPRKN12, ERKN4, ERKN5, ERKN7 diff --git a/src/alg_utils.jl b/src/alg_utils.jl index 17d0fe95a6..1cf3b56f36 100644 --- a/src/alg_utils.jl +++ b/src/alg_utils.jl @@ -508,6 +508,7 @@ alg_order(alg::IRKN3) = 3 alg_order(alg::Nystrom4) = 4 alg_order(alg::FineRKN4) = 4 alg_order(alg::FineRKN5) = 5 +alg_order(alg::SharpFineRKN6) = 6 alg_order(alg::Nystrom4VelocityIndependent) = 4 alg_order(alg::IRKN4) = 4 alg_order(alg::Nystrom5VelocityIndependent) = 5 diff --git a/src/algorithms.jl b/src/algorithms.jl index a8959e0606..27d6ea2128 100644 --- a/src/algorithms.jl +++ b/src/algorithms.jl @@ -969,6 +969,31 @@ In particular, this method allows the acceleration equation to depend on the vel """ struct FineRKN5 <: OrdinaryDiffEqAdaptivePartitionedAlgorithm end +""" + SharpFineRKN6() + +A adaptive 6th order explicit Runge-Kutta-Nyström method which can be applied directly to second order ODEs. +In particular, this method allows the acceleration equation to depend on the velocity. + +## References + +``` +@article{SHARP1992279, +title = {Some Nyström pairs for the general second-order initial-value problem}, +journal = {Journal of Computational and Applied Mathematics}, +volume = {42}, +number = {3}, +pages = {279-291}, +year = {1992}, +issn = {0377-0427}, +doi = {https://doi.org/10.1016/0377-0427(92)90081-8}, +url = {https://www.sciencedirect.com/science/article/pii/0377042792900818}, +author = {P.W. Sharp and J.M. Fine} +} +``` +""" +struct SharpFineRKN6 <: OrdinaryDiffEqAdaptivePartitionedAlgorithm end + """ Nystrom4VelocityIdependent diff --git a/src/caches/rkn_caches.jl b/src/caches/rkn_caches.jl index 7ffa67bf23..5043fe930f 100644 --- a/src/caches/rkn_caches.jl +++ b/src/caches/rkn_caches.jl @@ -124,6 +124,55 @@ function alg_cache(alg::FineRKN5, u, rate_prototype, ::Type{uEltypeNoUnits}, FineRKN5ConstantCache(constvalue(uBottomEltypeNoUnits), constvalue(tTypeNoUnits)) end +@cache struct SharpFineRKN6Cache{uType, rateType, reducedRateType, uNoUnitsType, TabType} <: + OrdinaryDiffEqMutableCache + u::uType + uprev::uType + fsalfirst::rateType + k2::reducedRateType + k3::reducedRateType + k4::reducedRateType + k5::reducedRateType + k6::reducedRateType + k7::reducedRateType + k8::reducedRateType + k::rateType + utilde::uType + tmp::uType + atmp::uNoUnitsType + tab::TabType +end + +function alg_cache(alg::SharpFineRKN6, u, rate_prototype, ::Type{uEltypeNoUnits}, + ::Type{uBottomEltypeNoUnits}, ::Type{tTypeNoUnits}, uprev, uprev2, f, t, + dt, reltol, p, calck, + ::Val{true}) where {uEltypeNoUnits, uBottomEltypeNoUnits, tTypeNoUnits} + reduced_rate_prototype = rate_prototype.x[2] + tab = SharpFineRKN6ConstantCache(constvalue(uBottomEltypeNoUnits), + constvalue(tTypeNoUnits)) + k1 = zero(rate_prototype) + k2 = zero(reduced_rate_prototype) + k3 = zero(reduced_rate_prototype) + k4 = zero(reduced_rate_prototype) + k5 = zero(reduced_rate_prototype) + k6 = zero(reduced_rate_prototype) + k7 = zero(reduced_rate_prototype) + k8 = zero(reduced_rate_prototype) + k = zero(rate_prototype) + utilde = zero(u) + atmp = similar(u, uEltypeNoUnits) + recursivefill!(atmp, false) + tmp = zero(u) + SharpFineRKN6Cache(u, uprev, k1, k2, k3, k4, k5, k6, k7, k8, k, utilde, tmp, atmp, tab) +end + +function alg_cache(alg::SharpFineRKN6, u, rate_prototype, ::Type{uEltypeNoUnits}, + ::Type{uBottomEltypeNoUnits}, ::Type{tTypeNoUnits}, uprev, uprev2, f, t, + dt, reltol, p, calck, + ::Val{false}) where {uEltypeNoUnits, uBottomEltypeNoUnits, tTypeNoUnits} + SharpFineRKN6ConstantCache(constvalue(uBottomEltypeNoUnits), constvalue(tTypeNoUnits)) +end + @cache struct Nystrom4VelocityIndependentCache{uType, rateType, reducedRateType} <: OrdinaryDiffEqMutableCache u::uType diff --git a/src/perform_step/rkn_perform_step.jl b/src/perform_step/rkn_perform_step.jl index 4d93f3c9c6..20b5769f4d 100644 --- a/src/perform_step/rkn_perform_step.jl +++ b/src/perform_step/rkn_perform_step.jl @@ -5,7 +5,7 @@ ## y'₁ = y'₀ + h∑bᵢk'ᵢ const NystromCCDefaultInitialization = Union{Nystrom4ConstantCache, FineRKN4ConstantCache, - FineRKN5ConstantCache, + FineRKN5ConstantCache, SharpFineRKN6ConstantCache, Nystrom4VelocityIndependentConstantCache, Nystrom5VelocityIndependentConstantCache, IRKN3ConstantCache, IRKN4ConstantCache, @@ -27,6 +27,7 @@ function initialize!(integrator, cache::NystromCCDefaultInitialization) end const NystromDefaultInitialization = Union{Nystrom4Cache, FineRKN4Cache, FineRKN5Cache, + SharpFineRKN6Cache, Nystrom4VelocityIndependentCache, Nystrom5VelocityIndependentCache, IRKN3Cache, IRKN4Cache, @@ -365,6 +366,182 @@ end end end +@muladd function perform_step!(integrator, cache::SharpFineRKN6ConstantCache, + repeat_step = false) + @unpack t, dt, f, p = integrator + duprev, uprev = integrator.uprev.x + @unpack c2, c3, c4, c5, c6, c7, c8, a21, a31, a32, a41, a42, a43, a51, + a52, a53, a54, a61, a62, a63, a64, a65, a71, a72, a73, a74, a75, a76, a81, a82, a83, a84, a85, a86, a87, + abar21, abar31, abar32, abar41, abar42, abar43, abar51, + abar52, abar53, abar54, abar61, abar62, abar63, abar64, abar65, + abar71, abar72, abar73, abar74, abar75, abar76, abar81, abar82, abar83, abar84, abar85, abar86, abar87, b1, b2, b3, b4, + b5, b6, b7, b8, bp1, bp2, bp3, bp4, bp5, bp6, bp7, bp8, btilde1, btilde2, btilde3, btilde4, btilde5, btilde6, btilde7, btilde8, bptilde1, bptilde2, bptilde3, bptilde4, + bptilde5, bptilde6, bptilde7, bptilde8 = cache + k1 = integrator.fsalfirst.x[1] + + ku = uprev + dt * (c2 * duprev + dt * (a21 * k1)) + kdu = duprev + dt * (abar21 * k1) + + k2 = f.f1(kdu, ku, p, t + dt * c2) + ku = uprev + dt * (c3 * duprev + dt * (a31 * k1 + a32 * k2)) + kdu = duprev + dt * (abar31 * k1 + abar32 * k2) + + k3 = f.f1(kdu, ku, p, t + dt * c3) + ku = uprev + dt * (c4 * duprev + dt * (a41 * k1 + a42 * k2 + a43 * k3)) + kdu = duprev + dt * (abar41 * k1 + abar42 * k2 + abar43 * k3) + + k4 = f.f1(kdu, ku, p, t + dt * c4) + ku = uprev + dt * (c5 * duprev + dt * (a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4)) + kdu = duprev + dt * (abar51 * k1 + abar52 * k2 + abar53 * k3 + abar54 * k4) + + k5 = f.f1(kdu, ku, p, t + dt * c5) + ku = uprev + + dt * (c6 * duprev + dt * (a61 * k1 + a62 * k2 + a63 * k3 + a64 * k4 + a65 * k5)) + kdu = duprev + + dt * (abar61 * k1 + abar62 * k2 + abar63 * k3 + abar64 * k4 + abar65 * k5) + + k6 = f.f1(kdu, ku, p, t + dt * c6) + ku = uprev + + dt * (c7 * duprev + + dt * (a71 * k1 + a72 * k2 + a73 * k3 + a74 * k4 + a75 * k5 + a76 * k6)) + kdu = duprev + + dt * (abar71 * k1 + abar72 * k2 + abar73 * k3 + abar74 * k4 + abar75 * k5 + + abar76 * k6) + + k7 = f.f1(kdu, ku, p, t + dt * c7) + ku = uprev + + dt * (c8 * duprev + + dt * (a81 * k1 + a82 * k2 + a83 * k3 + a84 * k4 + a85 * k5 + a86 * k6 + a87 * k7)) + kdu = duprev + + dt * (abar81 * k1 + abar82 * k2 + abar83 * k3 + abar84 * k4 + abar85 * k5 + + abar86 * k6 + abar87 * k7) + + k8 = f.f1(kdu, ku, p, t + dt * c8) + u = uprev + + dt * (duprev + + dt * + (b1 * k1 + b2 * k2 + b3 * k3 + b4 * k4 + b5 * k5 + b6 * k6 + b7 * k7 + b8 * k8)) + du = duprev + + dt * (bp1 * k1 + bp2 * k2 + bp3 * k3 + bp4 * k4 + bp5 * k5 + bp6 * k6 + bp7 * k7 + + bp8 * k8) + + integrator.u = ArrayPartition((du, u)) + integrator.fsallast = ArrayPartition((f.f1(du, u, p, t + dt), f.f2(du, u, p, t + dt))) + integrator.stats.nf += 8 + integrator.stats.nf2 += 1 + integrator.k[1] = integrator.fsalfirst + integrator.k[2] = integrator.fsallast + + #if integrator.opts.adaptive + # dtsq = dt^2 + # uhat = dtsq * + # (btilde1 * k1 + btilde3 * k3 + btilde4 * k4 + btilde5 * k5 + btilde6 * k6 + + # btilde7 * k7 + btilde8 * k8) + # duhat = dt * (bptilde1 * k1 + bptilde3 * k3 + bptilde4 * k4 + bptilde5 * k5 + + # bptilde6 * k6 + bptilde7 * k7 + bptilde8 * k8) + # utilde = ArrayPartition((duhat, uhat)) + # atmp = calculate_residuals(utilde, integrator.uprev, integrator.u, + # integrator.opts.abstol, integrator.opts.reltol, + # integrator.opts.internalnorm, t) + # integrator.EEst = integrator.opts.internalnorm(atmp, t) + #end +end + +@muladd function perform_step!(integrator, cache::SharpFineRKN6Cache, repeat_step = false) + @unpack t, dt, f, p = integrator + du, u = integrator.u.x + duprev, uprev = integrator.uprev.x + @unpack tmp, atmp, fsalfirst, k2, k3, k4, k5, k6, k7, k8, k, utilde = cache + @unpack c1, c2, c3, c4, c5, c6, c7, c8, a21, a31, a32, a41, a42, a43, a51, + a52, a53, a54, a61, a62, a63, a64, a65, a71, a72, a73, a74, a75, a76, a81, a82, a83, a84, a85, a86, a87, + abar21, abar31, abar32, abar41, abar42, abar43, abar51, + abar52, abar53, abar54, abar61, abar62, abar63, abar64, abar65, + abar71, abar72, abar73, abar74, abar75, abar76, abar81, abar82, abar83, abar84, abar85, abar86, abar87, b1, b2, b3, b4, + b5, b6, b7, b8, bp1, bp2, bp3, bp4, bp5, bp6, bp7, bp8, btilde1, btilde2, btilde3, btilde4, btilde5, btilde6, btilde7, btilde8, bptilde1, bptilde2, bptilde3, bptilde4, + bptilde5, bptilde6, bptilde7, bptilde8 = cache.tab + kdu, ku = integrator.cache.tmp.x[1], integrator.cache.tmp.x[2] + uidx = eachindex(integrator.uprev.x[2]) + k1 = integrator.fsalfirst.x[1] + + @.. broadcast=false ku=uprev + dt * (c2 * duprev + dt * (a21 * k1)) + @.. broadcast=false kdu=duprev + dt * (abar21 * k1) + + f.f1(k2, kdu, ku, p, t + dt * c2) + @.. broadcast=false ku=uprev + dt * (c3 * duprev + dt * (a31 * k1 + a32 * k2)) + @.. broadcast=false kdu=duprev + dt * (abar31 * k1 + abar32 * k2) + + f.f1(k3, kdu, ku, p, t + dt * c3) + @.. broadcast=false ku=uprev + + dt * (c4 * duprev + dt * (a41 * k1 + a42 * k2 + a43 * k3)) + @.. broadcast=false kdu=duprev + dt * (abar41 * k1 + abar42 * k2 + abar43 * k3) + + f.f1(k4, kdu, ku, p, t + dt * c4) + @.. broadcast=false ku=uprev + + dt * + (c5 * duprev + dt * (a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4)) + @.. broadcast=false kdu=duprev + + dt * (abar51 * k1 + abar52 * k2 + abar53 * k3 + abar54 * k4) + + f.f1(k5, kdu, ku, p, t + dt * c5) + @.. broadcast=false ku=uprev + + dt * (c6 * duprev + + dt * (a61 * k1 + a62 * k2 + a63 * k3 + a64 * k4 + a65 * k5)) + @.. broadcast=false kdu=duprev + + dt * (abar61 * k1 + abar62 * k2 + abar63 * k3 + abar64 * k4 + + abar65 * k5) + + f.f1(k6, kdu, ku, p, t + dt * c6) + @.. broadcast=false ku=uprev + + dt * (c7 * duprev + + dt * (a71 * k1 + a72 * k2 + a73 * k3 + a74 * k4 + a75 * k5 + + a76 * k6)) + @.. broadcast=false kdu=duprev + + dt * (abar71 * k1 + abar72 * k2 + abar73 * k3 + abar74 * k4 + + abar75 * k5 + abar76 * k6) + + f.f1(k7, kdu, ku, p, t + dt * c7) + @.. broadcast=false ku=uprev + + dt * (c8 * duprev + + dt * (a81 * k1 + a82 * k2 + a83 * k3 + a84 * k4 + a85 * k5 + + a86 * k6 + a87 * k7)) + @.. broadcast=false kdu=duprev + + dt * (abar81 * k1 + abar82 * k2 + abar83 * k3 + abar84 * k4 + + abar85 * k5 + + abar86 * k6 + abar87 * k7) + + f.f1(k8, kdu, ku, p, t + dt * c8) + @.. broadcast=false u=uprev + + dt * (duprev + + dt * + (b1 * k1 + b2 * k2 + b3 * k3 + b4 * k4 + b5 * k5 + b6 * k6 + + b7 * k7 + b8 * k8)) + @.. broadcast=false du=duprev + + dt * + (bp1 * k1 + bp2 * k2 + bp3 * k3 + bp4 * k4 + bp5 * k5 + + bp6 * k6 + bp7 * k7 + bp8 * k8) + + f.f1(k.x[1], du, u, p, t + dt) + f.f2(k.x[2], du, u, p, t + dt) + integrator.stats.nf += 8 + integrator.stats.nf2 += 1 + #if integrator.opts.adaptive + # duhat, uhat = utilde.x + # dtsq = dt^2 + # @.. broadcast=false uhat=dtsq * + # (btilde1 * k1 + btilde3 * k3 + btilde4 * k4 + + # btilde5 * k5 + btilde6 * k6 + btilde7 * k7 + btilde8 * k8) + # @.. broadcast=false duhat=dt * + # (bptilde1 * k1 + bptilde3 * k3 + bptilde4 * k4 + + # bptilde5 * k5 + bptilde6 * k6 + bptilde7 * k7 + + # bptilde8 * k8) + + # calculate_residuals!(atmp, utilde, integrator.uprev, integrator.u, + # integrator.opts.abstol, integrator.opts.reltol, + # integrator.opts.internalnorm, t) + # integrator.EEst = integrator.opts.internalnorm(atmp, t) + #end +end + @muladd function perform_step!(integrator, cache::Nystrom4VelocityIndependentConstantCache, repeat_step = false) @unpack t, dt, f, p = integrator diff --git a/src/tableaus/rkn_tableaus.jl b/src/tableaus/rkn_tableaus.jl index d4e89d2a15..6b3d1140a0 100644 --- a/src/tableaus/rkn_tableaus.jl +++ b/src/tableaus/rkn_tableaus.jl @@ -266,6 +266,244 @@ function FineRKN5ConstantCache(T::Type, T2::Type) bptilde3, bptilde4, bptilde5, bptilde6, bptilde7) end +struct SharpFineRKN6ConstantCache{T, T2} <: OrdinaryDiffEqConstantCache + c1::T2 + c2::T2 + c3::T2 + c4::T2 + c5::T2 + c6::T2 + c7::T2 + c8::T2 + a21::T + a31::T + a32::T + a41::T + a42::T + a43::T + a51::T + a52::T + a53::T + a54::T + a61::T + a62::T + a63::T + a64::T + a65::T + a71::T + a72::T + a73::T + a74::T + a75::T + a76::T + a81::T + a82::T + a83::T + a84::T + a85::T + a86::T + a87::T + abar21::T + abar31::T + abar32::T + abar41::T + abar42::T + abar43::T + abar51::T + abar52::T + abar53::T + abar54::T + abar61::T + abar62::T + abar63::T + abar64::T + abar65::T + abar71::T + abar72::T + abar73::T + abar74::T + abar75::T + abar76::T + abar81::T + abar82::T + abar83::T + abar84::T + abar85::T + abar86::T + abar87::T + b1::T + b2::T + b3::T + b4::T + b5::T + b6::T + b7::T + b8::T + bp1::T + bp2::T + bp3::T + bp4::T + bp5::T + bp6::T + bp7::T + bp8::T + #bhat1::T + #bhat2::T + #bhat3::T + #bhat4::T + #bhat5::T + #bhat6::T + #bhat7::T + #bhat8::T + #bphat1::T + #bphat2::T + #bphat3::T + #bphat4::T + #bphat5::T + #bphat6::T + #bphat7::T + #bphat8::T + btilde1::T + btilde2::T + btilde3::T + btilde4::T + btilde5::T + btilde6::T + btilde7::T + btilde8::T + bptilde1::T + bptilde2::T + bptilde3::T + bptilde4::T + bptilde5::T + bptilde6::T + bptilde7::T + bptilde8::T +end + +function SharpFineRKN6ConstantCache(T::Type, T2::Type) + c1 = convert(T2, 1 // 1) + c2 = convert(T2, 1 // 10) + c3 = convert(T2, 2 // 9) + c4 = convert(T2, 3 // 7) + c5 = convert(T2, 2 // 3) + c6 = convert(T2, 4 // 5) + c7 = convert(T2, 1 // 1) + c8 = convert(T2, 1 // 1) + abar21 = convert(T, 1 // 10) + abar31 = convert(T, -2 // 81) + abar32 = convert(T, 20 // 81) + abar41 = convert(T, 615 // 1372) + abar42 = convert(T, -270 // 343) + abar43 = convert(T, 1053 // 1372) + abar51 = convert(T, 140 // 297) + abar52 = convert(T, -20 // 33) + abar53 = convert(T, 42 // 143) + abar54 = convert(T, 1960 // 3861) + abar61 = convert(T, -15544 // 20625) + abar62 = convert(T, 72 // 55) + abar63 = convert(T, 1053 // 6875) + abar64 = convert(T, -40768 // 103125) + abar65 = convert(T, 1521 // 3125) + abar71 = convert(T, 6841 // 1584) + abar72 = convert(T, -60 // 11) + abar73 = convert(T, -15291 // 7436) + abar74 = convert(T, 207319 // 33462) + abar75 = convert(T, -27 // 8) + abar76 = convert(T, 11125 // 8112) + abar81 = convert(T, 207707 // 54432) + abar82 = convert(T, -305 // 63) + abar83 = convert(T, -24163 // 14196) + abar84 = convert(T, 4448227//821340) + abar85 = convert(T, -4939 // 1680) + abar86 = convert(T, 3837625 // 3066336) + abar87 = convert(T, 0 // 1) + a21 = convert(T, 1 // 200) + a31 = convert(T, 14 // 2187) + a32 = convert(T, 40 // 2187) + a41 = convert(T, 148 // 3087) + a42 = convert(T, -85 // 3087) + a43 = convert(T, 1 // 14) + a51 = convert(T, -2207 // 28350) + a52 = convert(T, 932 // 2835) + a53 = convert(T, -7 // 50) + a54 = convert(T, 1 // 9) + a61 = convert(T, 13198826 // 54140625) + a62 = convert(T, -5602364 // 10828125) + a63 = convert(T, 27987101 // 44687500) + a64 = convert(T, -332539 // 4021872) + a65 = convert(T, 1 // 20) + a71 = convert(T, -601416947 // 162162000) + a72 = convert(T, 2972539 // 810810) + a73 = convert(T, 10883471 // 2574000) + a74 = convert(T, -503477 // 99000) + a75 = convert(T, 3 // 5) + a76 = convert(T, 4 // 5) + a81 = convert(T, -228527046421 // 72442188000) + a82 = convert(T, 445808287 // 139311900) + a83 = convert(T, 104724572891 // 298967760000) + a84 = convert(T, -31680158501 // 747419400) + a85 = convert(T, 1033813 // 2044224) + a86 = convert(T, 1166143 // 1703520) + a87 = convert(T, 0 // 1) + b1 = convert(T, 23 // 320) + b2 = convert(T, 0 // 1) + b3 = convert(T, 12393 // 54080) + b4 = convert(T, 2401 // 25350) + b5 = convert(T, 99 // 1600) + b6 = convert(T, 1375 // 32448) + b7 = convert(T, 0 // 1) + b8 = convert(T, 0 // 1) + bp1 = convert(T, 23 // 320) + bp2 = convert(T, 0 // 1) + bp3 = convert(T, 111537 // 378560) + bp4 = convert(T, 16807 // 101400) + bp5 = convert(T, 297 // 1600) + bp6 = convert(T, 6875 // 32448) + bp7 = convert(T, -319 // 840) + bp8 = convert(T, 9 // 20) + #bhat1 = convert(T, 22151 // 202500) + #bhat2 = convert(T, 0 // 1) + #bhat3 = convert(T, 64521 // 910000) + #bhat4 = convert(T, 8536927 // 26325000) + #bhat5 = convert(T, -16429 // 150000) + #bhat6 = convert(T, 1 // 10) + #bhat7 = convert(T, -3971 // 37800) + #bhat8 = convert(T, 11 // 100) + #bphat1 = convert(T, 241 // 2880) + #bphat2 = convert(T, 0 // 1) + #bphat3 = convert(T, 12393 // 54080) + #bphat4 = convert(T, 45619 // 152100) + #bphat5 = convert(T, -9 // 1600) + #bphat6 = convert(T, 11125 // 32448) + #bphat7 = convert(T, 1 // 20) + #bphat8 = convert(T, 0 // 1) + # btilde = b - bhat + btilde1 = convert(T, -121541 // 3240000) + btilde2 = convert(T, 0 // 1) + btilde3 = convert(T, 7488783 // 47320000) + btilde4 = convert(T, -78566551 // 342225000) + btilde5 = convert(T, 102841 // 600000) + btilde6 = convert(T, -9349 // 162240) + btilde7 = convert(T, 3971 // 37800) + btilde8 = convert(T, -11 // 100) + bptilde1 = convert(T, -17 // 1440) + bptilde2 = convert(T, 0 // 1) + bptilde3 = convert(T, 12393 // 189280) + bptilde4 = convert(T, -40817 // 304200) + bptilde5 = convert(T, 153 // 800) + bptilde6 = convert(T, -2125 // 16224) + bptilde7 = convert(T, -361 // 840) + bptilde8 = convert(T, 9 // 20) + SharpFineRKN6ConstantCache(c1, c2, c3, c4, c5, c6, c7, c8, + a21, a31, a32, a41, a42, a43, a51, a52, a53, a54, a61, a62, a63, a64, a65, a71, a72, a73, a74, a75, a76, a81, a82, a83, a84, a85, a86, a87, + abar21, abar31, abar32, abar41, abar42, abar43, abar51, abar52, abar53, abar54, abar61, abar62, abar63, abar64, abar65, + abar71, abar72, abar73, abar74, abar75, abar76, abar81, abar82, abar83, abar84, abar85, abar86, abar87, + b1, b2, b3, b4, b5, b6, b7, b8, bp1, bp2, bp3, bp4, bp5, bp6, bp7, bp8, + btilde1, btilde2, btilde3, btilde4, btilde5, btilde6, btilde7, btilde8, + bptilde1, bptilde2, bptilde3, bptilde4, bptilde5, bptilde6, bptilde7, bptilde8) +end + struct IRKN3ConstantCache{T, T2} <: OrdinaryDiffEqConstantCache bconst1::T bconst2::T