diff --git a/GaussTuranExampleTemp/example.jl b/GaussTuranExampleTemp/example.jl new file mode 100644 index 0000000..3a8a629 --- /dev/null +++ b/GaussTuranExampleTemp/example.jl @@ -0,0 +1,63 @@ +using Optim +using PreallocationTools +using Integrals +using DoubleFloats + +include("high_order_derivatives.jl") + +# This fix is needed to avoid crashing: https://github.com/JuliaLang/julia/pull/54201 +include("linear_algebra_fix.jl") + +FT = Double64 +n = 5 +s = 1 +a = FT(0.0) +b = FT(1.0) + +""" + Example functions ϕⱼ from [1] +""" +function f(x::T, j::Int)::T where {T<:Number} + pow = if j % 2 == 0 + j / 2 - 1 + else + (j - 1) / 2 - 1 / 3 + end + x^pow +end + +deriv! = @generate_derivs(2) +I = Integrals.GaussTuran( + f, + deriv!, + a, + b, + n, + s; + optimization_options = Optim.Options(; + x_abstol = FT(1e-250), + g_tol = FT(1e-250), + show_trace = true, + show_every = 100, + iterations = 10_000, + ), +) + +# Integration error |I(fⱼ) - ₐ∫ᵇfⱼ(x)dx| first N functions fⱼ +max_int_error_upper = + maximum(abs(I(x -> f(x, j)) - I.cache.rhs_upper[j]) for j = 1:I.cache.N) +@show max_int_error_upper +# max_int_error_upper = 3.0814879110195774e-32 + +# Integration error |I(fⱼ) - ₐ∫ᵇfⱼ(x)dx| last n functions fⱼ +max_int_error_lower = maximum( + abs(I(x -> f(x, j)) - I.cache.rhs_lower[j-I.cache.N]) for + j = (I.cache.N+1):(I.cache.N+I.cache.n) +) +@show max_int_error_lower +# max_int_error_lower = 2.8858134286698342e-30 + +# Example with eˣ +exp_error = abs(I(Base.exp) - (Base.exp(1) - 1)) +@show exp_error; +# exp_error = -6.436621416953051e-18 \ No newline at end of file diff --git a/GaussTuranExampleTemp/high_order_derivatives.jl b/GaussTuranExampleTemp/high_order_derivatives.jl new file mode 100644 index 0000000..8de081a --- /dev/null +++ b/GaussTuranExampleTemp/high_order_derivatives.jl @@ -0,0 +1,57 @@ +using ForwardDiff + +""" + Generate a function `derivs!(out, f, x)` which computes the 0th up to the max_orderth derivative + of the scalar-to-scalar function f at x and stores them in ascending derivative order in `out`. + Hence `out` must be at least of length `max_order + 1`. +""" +macro generate_derivs(max_order::Int) + # Create nested dual number of required depth (arg_0, …, arg_{max_order}) + arg_assignments = [:(arg_0 = x)] + for i = 1:max_order + arg_name = Symbol("arg_$i") + prev_arg_name = Symbol("arg_$(i-1)") + push!( + arg_assignments, + :($arg_name = ForwardDiff.Dual{Val{$i}}($prev_arg_name, one($prev_arg_name))), + ) + end + + # Unpack the results + arg_max = Symbol("arg_$max_order") + res_unpacks = [:(res_0 = f($arg_max))] + for i = 1:max_order + res_name = Symbol("res_$i") + res_prev_name = Symbol("res_$(i-1)") + push!(res_unpacks, :($res_name = only($res_prev_name.partials))) + end + + # Assign the results + out_assignments = Expr[] + for i = 0:max_order + res = Symbol("res_$i") + res_temp = Symbol("$(res)_temp_0") + push!(out_assignments, :($res_temp = $res)) + # Create temporary variables to get to + # res_{i}.value.value.value… + for j = 1:(max_order-i) + res_temp = Symbol("$(res)_temp_$j") + res_temp_prev = Symbol("$(res)_temp_$(j-1)") + push!(out_assignments, :($res_temp = $res_temp_prev.value)) + end + res_temp = Symbol("$(res)_temp_$(max_order - i)") + push!(out_assignments, :(out[$(i + 1)] = $res_temp)) + end + + # Construct the complete function definition + func_def = quote + function derivs!(out, f, x::T)::Nothing where {T<:Number} + $(arg_assignments...) + $(res_unpacks...) + $(out_assignments...) + return nothing + end + end + + return func_def +end \ No newline at end of file diff --git a/GaussTuranExampleTemp/linear_algebra_fix.jl b/GaussTuranExampleTemp/linear_algebra_fix.jl new file mode 100644 index 0000000..1055b8e --- /dev/null +++ b/GaussTuranExampleTemp/linear_algebra_fix.jl @@ -0,0 +1,292 @@ +using LinearAlgebra +using LinearAlgebra: AdjOrTrans, require_one_based_indexing, _ustrip + +# manually hoisting b[j] significantly improves performance as of Dec 2015 +# manually eliding bounds checking significantly improves performance as of Dec 2015 +# replacing repeated references to A.data[j,j] with [Ajj = A.data[j,j] and references to Ajj] +# does not significantly impact performance as of Dec 2015 +# in the transpose and conjugate transpose naive substitution variants, +# accumulating in z rather than b[j,k] significantly improves performance as of Dec 2015 +function LinearAlgebra.generic_trimatdiv!(C::AbstractVecOrMat, uploc, isunitc, tfun::Function, A::AbstractMatrix, B::AbstractVecOrMat) + require_one_based_indexing(C, A, B) + mA, nA = size(A) + m, n = size(B, 1), size(B,2) + if nA != m + throw(DimensionMismatch(lazy"second dimension of left hand side A, $nA, and first dimension of right hand side B, $m, must be equal")) + end + if size(C) != size(B) + throw(DimensionMismatch(lazy"size of output, $(size(C)), does not match size of right hand side, $(size(B))")) + end + iszero(mA) && return C + oA = oneunit(eltype(A)) + @inbounds if uploc == 'U' + if isunitc == 'N' + if tfun === identity + for k in 1:n + amm = A[m,m] + iszero(amm) && throw(SingularException(m)) + Cm = C[m,k] = amm \ B[m,k] + # fill C-column + for i in m-1:-1:1 + C[i,k] = oA \ B[i,k] - _ustrip(A[i,m]) * Cm + end + for j in m-1:-1:1 + ajj = A[j,j] + iszero(ajj) && throw(SingularException(j)) + Cj = C[j,k] = _ustrip(ajj) \ C[j,k] + for i in j-1:-1:1 + C[i,k] -= _ustrip(A[i,j]) * Cj + end + end + end + else # tfun in (adjoint, transpose) + for k in 1:n + for j in 1:m + ajj = A[j,j] + iszero(ajj) && throw(SingularException(j)) + Bj = B[j,k] + for i in 1:j-1 + Bj -= tfun(A[i,j]) * C[i,k] + end + C[j,k] = tfun(ajj) \ Bj + end + end + end + else # isunitc == 'U' + if tfun === identity + for k in 1:n + Cm = C[m,k] = oA \ B[m,k] + # fill C-column + for i in m-1:-1:1 + C[i,k] = oA \ B[i,k] - _ustrip(A[i,m]) * Cm + end + for j in m-1:-1:1 + Cj = C[j,k] + for i in 1:j-1 + C[i,k] -= _ustrip(A[i,j]) * Cj + end + end + end + else # tfun in (adjoint, transpose) + for k in 1:n + for j in 1:m + Bj = B[j,k] + for i in 1:j-1 + Bj -= tfun(A[i,j]) * C[i,k] + end + C[j,k] = oA \ Bj + end + end + end + end + else # uploc == 'L' + if isunitc == 'N' + if tfun === identity + for k in 1:n + a11 = A[1,1] + iszero(a11) && throw(SingularException(1)) + C1 = C[1,k] = a11 \ B[1,k] + # fill C-column + for i in 2:m + C[i,k] = oA \ B[i,k] - _ustrip(A[i,1]) * C1 + end + for j in 2:m + ajj = A[j,j] + iszero(ajj) && throw(SingularException(j)) + Cj = C[j,k] = _ustrip(ajj) \ C[j,k] + for i in j+1:m + C[i,k] -= _ustrip(A[i,j]) * Cj + end + end + end + else # tfun in (adjoint, transpose) + for k in 1:n + for j in m:-1:1 + ajj = A[j,j] + iszero(ajj) && throw(SingularException(j)) + Bj = B[j,k] + for i in j+1:m + Bj -= tfun(A[i,j]) * C[i,k] + end + C[j,k] = tfun(ajj) \ Bj + end + end + end + else # isunitc == 'U' + if tfun === identity + for k in 1:n + C1 = C[1,k] = oA \ B[1,k] + # fill C-column + for i in 2:m + C[i,k] = oA \ B[i,k] - _ustrip(A[i,1]) * C1 + end + for j in 2:m + Cj = C[j,k] + for i in j+1:m + C[i,k] -= _ustrip(A[i,j]) * Cj + end + end + end + else # tfun in (adjoint, transpose) + for k in 1:n + for j in m:-1:1 + Bj = B[j,k] + for i in j+1:m + Bj -= tfun(A[i,j]) * C[i,k] + end + C[j,k] = oA \ Bj + end + end + end + end + end + return C +end +# conjugate cases +function LinearAlgebra.generic_trimatdiv!(C::AbstractVecOrMat, uploc, isunitc, ::Function, xA::AdjOrTrans, B::AbstractVecOrMat) + A = parent(xA) + require_one_based_indexing(C, A, B) + mA, nA = size(A) + m, n = size(B, 1), size(B,2) + if nA != m + throw(DimensionMismatch(lazy"second dimension of left hand side A, $nA, and first dimension of right hand side B, $m, must be equal")) + end + if size(C) != size(B) + throw(DimensionMismatch(lazy"size of output, $(size(C)), does not match size of right hand side, $(size(B))")) + end + iszero(mA) && return C + oA = oneunit(eltype(A)) + @inbounds if uploc == 'U' + if isunitc == 'N' + for k in 1:n + amm = conj(A[m,m]) + iszero(amm) && throw(SingularException(m)) + Cm = C[m,k] = amm \ B[m,k] + # fill C-column + for i in m-1:-1:1 + C[i,k] = oA \ B[i,k] - _ustrip(conj(A[i,m])) * Cm + end + for j in m-1:-1:1 + ajj = conj(A[j,j]) + iszero(ajj) && throw(SingularException(j)) + Cj = C[j,k] = _ustrip(ajj) \ C[j,k] + for i in j-1:-1:1 + C[i,k] -= _ustrip(conj(A[i,j])) * Cj + end + end + end + else # isunitc == 'U' + for k in 1:n + Cm = C[m,k] = oA \ B[m,k] + # fill C-column + for i in m-1:-1:1 + C[i,k] = oA \ B[i,k] - _ustrip(conj(A[i,m])) * Cm + end + for j in m-1:-1:1 + Cj = C[j,k] + for i in 1:j-1 + C[i,k] -= _ustrip(conj(A[i,j])) * Cj + end + end + end + end + else # uploc == 'L' + if isunitc == 'N' + for k in 1:n + a11 = conj(A[1,1]) + iszero(a11) && throw(SingularException(1)) + C1 = C[1,k] = a11 \ B[1,k] + # fill C-column + for i in 2:m + C[i,k] = oA \ B[i,k] - _ustrip(conj(A[i,1])) * C1 + end + for j in 2:m + ajj = conj(A[j,j]) + iszero(ajj) && throw(SingularException(j)) + Cj = C[j,k] = _ustrip(ajj) \ C[j,k] + for i in j+1:m + C[i,k] -= _ustrip(conj(A[i,j])) * Cj + end + end + end + else # isunitc == 'U' + for k in 1:n + C1 = C[1,k] = oA \ B[1,k] + # fill C-column + for i in 2:m + C[i,k] = oA \ B[i,k] - _ustrip(conj(A[i,1])) * C1 + end + for j in 1:m + Cj = C[j,k] + for i in j+1:m + C[i,k] -= _ustrip(conj(A[i,j])) * Cj + end + end + end + end + end + return C +end + +function generic_mattridiv!(C::AbstractMatrix, uploc, isunitc, tfun::Function, A::AbstractMatrix, B::AbstractMatrix) + require_one_based_indexing(C, A, B) + m, n = size(A) + if size(B, 1) != n + throw(DimensionMismatch(lazy"right hand side B needs first dimension of size $n, has size $(size(B,1))")) + end + if size(C) != size(A) + throw(DimensionMismatch(lazy"size of output, $(size(C)), does not match size of left hand side, $(size(A))")) + end + oB = oneunit(eltype(B)) + unit = isunitc == 'U' + @inbounds if uploc == 'U' + if tfun === identity + for i in 1:m + for j in 1:n + Aij = A[i,j] + for k in 1:j - 1 + Aij -= C[i,k]*B[k,j] + end + unit || (iszero(B[j,j]) && throw(SingularException(j))) + C[i,j] = Aij / (unit ? oB : B[j,j]) + end + end + else # tfun in (adjoint, transpose) + for i in 1:m + for j in n:-1:1 + Aij = A[i,j] + for k in j + 1:n + Aij -= C[i,k]*tfun(B[j,k]) + end + unit || (iszero(B[j,j]) && throw(SingularException(j))) + C[i,j] = Aij / (unit ? oB : tfun(B[j,j])) + end + end + end + else # uploc == 'L' + if tfun === identity + for i in 1:m + for j in n:-1:1 + Aij = A[i,j] + for k in j + 1:n + Aij -= C[i,k]*B[k,j] + end + unit || (iszero(B[j,j]) && throw(SingularException(j))) + C[i,j] = Aij / (unit ? oB : B[j,j]) + end + end + else # tfun in (adjoint, transpose) + for i in 1:m + for j in 1:n + Aij = A[i,j] + for k in 1:j - 1 + Aij -= C[i,k]*tfun(B[j,k]) + end + unit || (iszero(B[j,j]) && throw(SingularException(j))) + C[i,j] = Aij / (unit ? oB : tfun(B[j,j])) + end + end + end + end + return C +end \ No newline at end of file diff --git a/Project.toml b/Project.toml index d281ac3..1de41f8 100644 --- a/Project.toml +++ b/Project.toml @@ -21,6 +21,8 @@ Cubature = "667455a9-e2ce-5579-9412-b964f529a492" FastGaussQuadrature = "442a2c76-b920-505d-bb47-c5924d526838" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" MCIntegration = "ea1e2de9-7db7-4b42-91ee-0cd1bf6df167" +Optim = "429524aa-4258-5aef-a3af-852621145aeb" +PreallocationTools = "d236fae5-4411-538c-8e31-a6e3d9e00b46" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [extensions] @@ -29,6 +31,7 @@ IntegralsCubaExt = "Cuba" IntegralsCubatureExt = "Cubature" IntegralsFastGaussQuadratureExt = "FastGaussQuadrature" IntegralsForwardDiffExt = "ForwardDiff" +IntegralsOptimExt = ["Optim", "PreallocationTools", "ForwardDiff"] IntegralsMCIntegrationExt = "MCIntegration" IntegralsZygoteExt = ["Zygote", "ChainRulesCore"] diff --git a/ext/IntegralsOptimExt.jl b/ext/IntegralsOptimExt.jl new file mode 100644 index 0000000..520ad24 --- /dev/null +++ b/ext/IntegralsOptimExt.jl @@ -0,0 +1,286 @@ + +module IntegralsOptimExt +using Base.Threads +using Integrals +if isdefined(Base, :get_extension) + using Optim + using ForwardDiff + using PreallocationTools +else + using ..Optim + using ..ForwardDiff + using ..PreallocationTools +end + +######################################################## +## Computing Gauss-Turán quadrature rules + +function DEFAULT_w(x::T)::T where {T} + one(T) +end + +""" +Cached data for the `GaussTuranLoss!` call. +""" +struct GaussTuranCache{T} + n::Int + s::Int + N::Int + a::T + b::T + ε::T + rhs_upper::Vector{T} + rhs_lower::Vector{T} + M_upper_buffer::LazyBufferCache{typeof(identity)} + M_lower_buffer::LazyBufferCache{typeof(identity)} + X_buffer::LazyBufferCache{typeof(identity)} + A_buffer::LazyBufferCache{typeof(identity)} + function GaussTuranCache( + n, + s, + N, + a::T, + b::T, + ε::T, + rhs_upper::Vector{T}, + rhs_lower::Vector{T}, + ) where {T} + new{T}( + n, + s, + N, + a, + b, + ε, + rhs_upper, + rhs_lower, + LazyBufferCache(), + LazyBufferCache(), + LazyBufferCache(), + LazyBufferCache(), + ) + end +end + +""" +Function whose root defines the quadrature rule. +""" +function GaussTuranLoss!(f!, ΔX, cache) + (; + n, + s, + N, + a, + rhs_upper, + rhs_lower, + M_upper_buffer, + M_lower_buffer, + A_buffer, + X_buffer, + ) = cache + M_upper = M_upper_buffer[ΔX, (N, N)] + M_lower = M_lower_buffer[ΔX, (n, N)] + A = A_buffer[ΔX, N] + X = X_buffer[ΔX, n] + + # Compute X from ΔX + cumsum!(X, ΔX) + X .+= a + + # Evaluating f! + for (i, x) in enumerate(X) + Threads.@threads for j = 1:N + f!(view(M_upper, j, i:n:N), x, j) + end + Threads.@threads for j = (N+1):(N+n) + f!(view(M_lower, j - N, i:n:N), x, j) + end + end + + # Solving for A + A .= M_upper \ rhs_upper + + # Computing output + out = zero(eltype(ΔX)) + for i in eachindex(X) + out_term = -rhs_lower[i] + for j in eachindex(A) + out_term += A[j] * M_lower[i, j] + end + out += out_term^2 + end + sqrt(out) +end + +""" + Callable result object of the Gauss-Turán quadrature rule + computation algorithm. +""" +struct GaussTuranResult{T,RType,dfType,deriv!Type} + X::Vector{T} + A::Matrix{T} + res::RType + cache::GaussTuranCache + df::dfType + deriv!::deriv!Type + f_tmp::Vector{T} + + function GaussTuranResult(res, cache::GaussTuranCache{T}, df, deriv!) where {T} + (; A_buffer, s, n, N, a) = cache + X = cumsum(res.minimizer) .+ a + df.f(res.minimizer) + A = reshape(A_buffer[T[], N], (n, 2s + 1)) + f_tmp = zeros(T, 2s + 1) + new{T,typeof(res),typeof(df),typeof(deriv!)}(X, A, res, cache, df, deriv!, f_tmp) + end +end + +""" + Input: function f(x, d) which gives the dth derivative of f +""" +function (I::GaussTuranResult{T} where {T})(integrand) + (; X, A, cache, deriv!, f_tmp) = I + out = zero(eltype(X)) + for (i, x) in enumerate(X) + deriv!(f_tmp, integrand, x) + for m = 1:(2*cache.s+1) + out += A[i, m] * f_tmp[m] + end + end + out +end + +""" + GaussTuran(f!, a, b, n, s; w = DEFAULT_w, ε = nothing, X₀ = nothing) + +Determine a quadrature rule + +I(g) = ∑ₘ∑ᵢ Aₘᵢ * ∂ᵐ⁻¹g(xᵢ) (m = 1, … 2s + 1, i = 1, …, n) + +that gives the precise integral ₐ∫ᵇf(x)dx for given linearly independent functions f₁, f₂, …, f₂₍ₛ₊₁₎ₙ. + +Method: + +The equations + +∑ₘ∑ᵢ Aₘᵢ * ∂ᵐ⁻¹fⱼ(xᵢ) = ₐ∫ᵇw(x)fⱼ(x)dx j = 1, …, 2(s+1)n + +define an overdetermined linear system M(X)A = b in the weights Aₘᵢ for a given X = (x₁, x₂, …, xₙ). +We split the matrix M into a square upper part M_upper of size (2s+1)n x (2s+1)n and a lower part M_lower of size n x (2s+1)n, +and the right hand size b analogously. From this we obtain A = M_upper⁻¹ * b_upper. Then we can asses the correctness of X by comparing +M_lower * A to b_lower, i.e. how well the last n equations holds. This yields the loss function + +loss(X) = ||M_lower * A - b_lower||₂ = ||M_lower * M_upper⁻¹ * b_upper - b_lower||₂. + +We have the constraints that we want X to be ordered and in the interval (a, b). To achieve this, we formulate the loss +in terms of ΔX = (Δx₁, Δx₂, …, Δxₙ) = (x₁ - a, x₂ - x₁, …, xₙ - xₙ₋₁) on which we set the constraints + +ε ≤ Δxᵢ ≤ b - a - 2 * ε i = 1, …, n +nε ≤ a + ∑ΔX ≤ b - a - ε + +where ε is an enforced minimum distance between the nodes. This prevents that consecutive nodes converge towards eachother making +M_upper singular. + +## Inputs + + - `f`: Function with signature `f(x::T, j)::T` that returns the d-th derivative of fⱼ at x + - `deriv!`: Function generated with @generate_derivs(2s) for automatically computing the derivatives of the fⱼ + - `a`: Integration lower bound + - `b`: Integration upper bound + - `n`: The number of nodes in the quadrature rule + - `s`: Determines the highest order derivative required from the functions fⱼ, currently 2(s + 1) + +## Keyword Arguments + + - `w`: the integrand weighting function, must have signature w(x::Number)::Number. Defaults to `w(x) = 1`. + - `ε`: the minimum distance between nodes. Defaults to 1e-3 * (b - a) / (n + 1). + - `X₀`: The initial guess for the nodes. Defaults to uniformly distributed over (a, b). + - `integration_kwargs`: The key word arguments passed to `solve` for integrating w * fⱼ + - `optimization_kwargs`: The key word arguments passed to `Optim.Options` for the minization problem + for finding X. +""" +function Integrals.GaussTuran( + f, + deriv!, + a::T, + b::T, + n, + s; + w = DEFAULT_w, + ε = nothing, + X₀ = nothing, + integration_kwargs::NamedTuple = (; reltol = 1e-120), + optimization_options::Optim.Options = Optim.Options(), +) where {T<:AbstractFloat} + # Initial guess + if isnothing(X₀) + X₀ = collect(range(a, b, length = n + 2)[2:(end-1)]) + else + @assert length(X₀) == n + end + ΔX₀ = diff(X₀) + pushfirst!(ΔX₀, X₀[1] - a) + + # Minimum distance between nodes + if isnothing(ε) + ε = 1e-3 * (b - a) / (n + 1) + else + @assert 0 < ε ≤ (b - a) / (n + 1) + end + ε = T(ε) + + # Integrate w * f + integrand = (out, x, j) -> out[] = w(x) * f(x, j) + function integrate(j) + prob = IntegralProblem{true}(integrand, (a, b), j) + res = solve(prob, QuadGKJL(); integration_kwargs...) + res.u[] + end + N = (2s + 1) * n + rhs_upper = [integrate(j) for j = 1:N] + rhs_lower = [integrate(j) for j = (N+1):(N+n)] + + # Solving constrained non linear problem for ΔX, see + # https://julianlsolvers.github.io/Optim.jl/stable/examples/generated/ipnewton_basics/ + + # The cache for evaluating GaussTuranLoss + cache = GaussTuranCache(n, s, N, a, b, ε, rhs_upper, rhs_lower) + + # In-place derivative computation of f + function f!(out, x, j::Int)::Nothing + deriv!(out, x -> f(x, j), x) + end + + # The function whose root defines the quadrature rule + # Note: the optimization method requires a Hessian, + # which brings the highest order derivative required to 2s + 2 + func(ΔX) = GaussTuranLoss!(f!, ΔX, cache) + df = TwiceDifferentiable(func, ΔX₀; autodiff = :forward) + + # The constraints on ΔX + ΔX_lb = fill(ε, length(ΔX₀)) + ΔX_ub = fill(b - a - 2 * ε, length(ΔX₀)) + + # Defining the variable and constraints nε ≤ a + ∑ΔX ≤ b - a - ε + sum_variable!(c, ΔX) = (c[1] = a + sum(ΔX); c) + sum_jacobian!(J, ΔX) = (J[1, :] .= one(eltype(ΔX)); J) + sum_hessian!(H, ΔX, λ) = nothing + sum_lb = [n * ε] + sum_ub = [b - a - ε] + constraints = TwiceDifferentiableConstraints( + sum_variable!, + sum_jacobian!, + sum_hessian!, + ΔX_lb, + ΔX_ub, + sum_lb, + sum_ub, + ) + + # Solve for the quadrature rule by minimizing the loss function + res = Optim.optimize(df, constraints, T.(ΔX₀), IPNewton(), optimization_options) + + GaussTuranResult(res, cache, df, deriv!) +end + +end # module IntegralsOptimExt \ No newline at end of file diff --git a/src/Integrals.jl b/src/Integrals.jl index d11fab9..715b0f1 100644 --- a/src/Integrals.jl +++ b/src/Integrals.jl @@ -20,6 +20,8 @@ include("sampled.jl") include("trapezoidal.jl") include("simpsons.jl") +function GaussTuran end + abstract type QuadSensitivityAlg end struct ReCallVJP{V} vjp::V