diff --git a/Project.toml b/Project.toml index bda050e6..5dc7cc58 100644 --- a/Project.toml +++ b/Project.toml @@ -11,6 +11,7 @@ FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" GeometricFlux = "7e08b658-56d3-11e9-2997-919d5b31e4ea" KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Polynomials = "f27b6e38-b328-58d1-80ce-0feddd5e7a45" SpecialPolynomials = "a25cea48-d430-424a-8ee7-0d3ad3742e9e" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" diff --git a/src/NeuralOperators.jl b/src/NeuralOperators.jl index 00c64175..aafa0fb7 100644 --- a/src/NeuralOperators.jl +++ b/src/NeuralOperators.jl @@ -12,6 +12,7 @@ using GeometricFlux using Statistics using Polynomials using SpecialPolynomials +using LinearAlgebra include("abstracttypes.jl") diff --git a/src/Transform/polynomials.jl b/src/Transform/polynomials.jl index 8bee986b..a7d26704 100644 --- a/src/Transform/polynomials.jl +++ b/src/Transform/polynomials.jl @@ -128,22 +128,25 @@ function chebyshev_ϕ_ψ(k) end function legendre_filter(k) - H0 = zeros(k, k)legendre + H0 = zeros(k, k) H1 = zeros(k, k) G0 = zeros(k, k) G1 = zeros(k, k) ϕ, ψ1, ψ2 = legendre_ϕ_ψ(k) - # roots = Poly(legendre(k, 2*x-1)).all_roots() - # x_m = np.array([rt.evalf(20) for rt in roots]).astype(np.float64) - # wm = 1/k/legendreDer(k,2*x_m-1)/eval_legendre(k-1,2*x_m-1) + l = convert(Polynomial, gen_poly(Legendre, k)) + x_m = roots(l(Polynomial([-1, 2]))) # 2x-1 + m = 2 .* x_m .- 1 + wm = 1 ./ k ./ legendre_der.(k, m) ./ gen_poly(Legendre, k-1).(m) - # for ki in range(k): - # for kpi in range(k): - # H0[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki](x_m/2) * phi[kpi](x_m)).sum() - # G0[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, x_m/2) * phi[kpi](x_m)).sum() - # H1[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki]((x_m+1)/2) * phi[kpi](x_m)).sum() - # G1[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, (x_m+1)/2) * phi[kpi](x_m)).sum() + for ki in 0:(k-1) + for kpi in 0:(k-1) + H0[ki+1, kpi+1] = 1/sqrt(2) * sum(wm .* ϕ[ki+1].(x_m/2) .* ϕ[kpi+1].(x_m)) + G0[ki+1, kpi+1] = 1/sqrt(2) * sum(wm .* ψ(ψ1, ψ2, ki, x_m/2) .* ϕ[kpi+1].(x_m)) + H1[ki+1, kpi+1] = 1/sqrt(2) * sum(wm .* ϕ[ki+1].((x_m.+1)/2) .* ϕ[kpi+1].(x_m)) + G1[ki+1, kpi+1] = 1/sqrt(2) * sum(wm .* ψ(ψ1, ψ2, ki, (x_m.+1)/2) .* ϕ[kpi+1].(x_m)) + end + end zero_out!(H0) zero_out!(H1) diff --git a/src/Transform/utils.jl b/src/Transform/utils.jl index ae64aa18..994fa75d 100644 --- a/src/Transform/utils.jl +++ b/src/Transform/utils.jl @@ -7,8 +7,8 @@ function ϕ_(ϕ_coefs; lb::Real=0., ub::Real=1.) end function ψ(ψ1, ψ2, i, inp) - mask = (inp ≤ 0.5) * 1.0 - return ψ1[i](inp) * mask + ψ2[i](inp) * (1-mask) + mask = (inp .> 0.5) .* 1.0 + return ψ1[i+1].(inp) .* mask .+ ψ2[i+1].(inp) .* mask end zero_out!(x; tol=1e-8) = (x[abs.(x) .< tol] .= 0) @@ -35,3 +35,13 @@ function proj_factor(a, b; complement::Bool=false) proj_ = sum(prod_ ./ r .* s) return proj_ end + +_legendre(k, x) = (2k+1) * gen_poly(Legendre, k)(x) + +function legendre_der(k, x) + out = 0 + for i in k-1:-2:-1 + out += _legendre(i, x) + end + return out +end diff --git a/test/polynomials.jl b/test/polynomials.jl index 09fb11d5..1163b7ef 100644 --- a/test/polynomials.jl +++ b/test/polynomials.jl @@ -64,4 +64,46 @@ @test ψ2[3](1) ≈ -1.0941547380212384 @test ψ2[3](2) ≈ 0. end + + @testset "legendre_filter" begin + H0, H1, G0, G1, Φ1, Φ2 = NeuralOperators.legendre_filter(3) + + @test H0 ≈ [0.70710678 0. 0. ; + -0.61237244 0.35355339 0. ; + 0. -0.6846532 0.1767767] + @test H1 ≈ [0.70710678 0. 0. ; + 0.61237244 0.35355339 0. ; + 0. 0.6846532 0.1767767] + @test G0 ≈ [0.35355339 0.61237244 0. ; + 0. 0.1767767 0.6846532 ; + 0. 0. 0.70710678] + @test G1 ≈ [-0.35355339 0.61237244 0. ; + 0. -0.1767767 0.6846532 ; + 0. 0. -0.70710678] + @test Φ1 == I(3) + @test Φ2 == I(3) + end + + @testset "chebyshev_filter" begin + # H0, H1, G0, G1, Φ1, Φ2 = NeuralOperators.chebyshev_filter(3) + + # @test H0 ≈ [0.70710678 0. 0. ; + # -0.5 0.35355339 0. ; + # -0.25 -0.70710678 0.1767767] + # @test H1 ≈ [0.70710678 0. 0. ; + # 0.5 0.35355339 0. ; + # -0.25 0.70710678 0.1767767] + # @test G0 ≈ [0.60944614 0.77940383 0. ; + # 0.66325172 1.02726613 1.14270252; + # 0.61723435 0.90708619 1.1562954 ] + # @test G1 ≈ [-0.60944614 0.77940383 0. ; + # 0.66325172 -1.02726613 1.14270252; + # -0.61723435 0.90708619 -1.1562954 ] + # @test Φ1 ≈ [1. -0.40715364 -0.21440101; + # -0.40715364 0.84839559 -0.44820615; + # -0.21440101 -0.44820615 0.84002127] + # @test Φ2 ≈ [1. 0.40715364 -0.21440101; + # 0.40715364 0.84839559 0.44820615; + # -0.21440101 0.44820615 0.84002127] + end end diff --git a/test/runtests.jl b/test/runtests.jl index 90d4dcb2..e2d65a17 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -3,6 +3,7 @@ using CUDA using Flux using GeometricFlux using Graphs +using LinearAlgebra using Polynomials using Zygote using Test