diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 4fd28dd07..49127a403 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -49,3 +49,54 @@ steps: GNN_TEST_AMDGPU: "true" GNN_TEST_CPU: "false" timeout_in_minutes: 60 + + - label: "GNNlib CUDA" + plugins: + - JuliaCI/julia#v1: + version: "1" + - JuliaCI/julia-coverage#v1: + dirs: + - GNNlib/src + command: | + julia --color=yes --depwarn=yes --project=GNNlib/test -e ' + import Pkg + dev_pkgs = Pkg.PackageSpec[] + for pkg in ("GNNGraphs", "GNNlib") + push!(dev_pkgs, Pkg.PackageSpec(path=pkg)); + end + Pkg.develop(dev_pkgs) + Pkg.add(["CUDA", "cuDNN"]) + Pkg.test("GNNlib")' + agents: + queue: "juliagpu" + cuda: "*" + env: + GNN_TEST_CUDA: "true" + GNN_TEST_CPU: "false" + timeout_in_minutes: 60 + + - label: "GNNlib AMDGPU" + plugins: + - JuliaCI/julia#v1: + version: "1" + - JuliaCI/julia-coverage#v1: + dirs: + - GNNlib/src + command: | + julia --color=yes --depwarn=yes --project=GNNlib/test -e ' + import Pkg + dev_pkgs = Pkg.PackageSpec[] + for pkg in ("GNNGraphs", "GNNlib") + push!(dev_pkgs, Pkg.PackageSpec(path=pkg)); + end + Pkg.develop(dev_pkgs) + Pkg.add(["AMDGPU"]) + Pkg.test("GNNlib")' + agents: + queue: "juliagpu" + rocm: "*" + rocmgpu: "*" + env: + GNN_TEST_AMDGPU: "true" + GNN_TEST_CPU: "false" + timeout_in_minutes: 60 diff --git a/GNNGraphs/src/GNNGraphs.jl b/GNNGraphs/src/GNNGraphs.jl index 5a5b5fe66..3054a9ab8 100644 --- a/GNNGraphs/src/GNNGraphs.jl +++ b/GNNGraphs/src/GNNGraphs.jl @@ -47,6 +47,7 @@ include("query.jl") export adjacency_list, edge_index, get_edge_weight, + get_graph_type, graph_indicator, has_multi_edges, is_directed, diff --git a/GNNGraphs/src/operators.jl b/GNNGraphs/src/operators.jl index 4fdd6ac87..1faa4adcb 100644 --- a/GNNGraphs/src/operators.jl +++ b/GNNGraphs/src/operators.jl @@ -6,8 +6,8 @@ Intersect two graphs by keeping only the common edges. """ function Base.intersect(g1::GNNGraph, g2::GNNGraph) @assert g1.num_nodes == g2.num_nodes - @assert graph_type_symbol(g1) == graph_type_symbol(g2) - graph_type = graph_type_symbol(g1) + @assert get_graph_type(g1) == get_graph_type(g2) + graph_type = get_graph_type(g1) num_nodes = g1.num_nodes idx1, _ = edge_encoding(edge_index(g1)..., num_nodes) diff --git a/GNNGraphs/src/query.jl b/GNNGraphs/src/query.jl index 18622af21..a11eb564c 100644 --- a/GNNGraphs/src/query.jl +++ b/GNNGraphs/src/query.jl @@ -80,9 +80,61 @@ function Graphs.has_edge(g::GNNHeteroGraph, edge_t::EType, i::Integer, j::Intege return any((s .== i) .& (t .== j)) end -graph_type_symbol(::GNNGraph{<:COO_T}) = :coo -graph_type_symbol(::GNNGraph{<:SPARSE_T}) = :sparse -graph_type_symbol(::GNNGraph{<:ADJMAT_T}) = :dense +""" + get_graph_type(g::GNNGraph) + +Return the underlying representation for the graph `g` as a symbol. + +Possible values are: +- `:coo`: Coordinate list representation. The graph is stored as a tuple of vectors `(s, t, w)`, + where `s` and `t` are the source and target nodes of the edges, and `w` is the edge weights. +- `:sparse`: Sparse matrix representation. The graph is stored as a sparse matrix representing the weighted adjacency matrix. +- `:dense`: Dense matrix representation. The graph is stored as a dense matrix representing the weighted adjacency matrix. + +The default representation for graph constructors GNNGraphs.jl is `:coo`. +The underlying representation can be accessed through the `g.graph` field. + +See also [`GNNGraph`](@ref). + +# Examples + +The default representation for graph constructors GNNGraphs.jl is `:coo`. +```jldoctest +julia> g = rand_graph(5, 10) +GNNGraph: + num_nodes: 5 + num_edges: 10 + +julia> get_graph_type(g) +:coo +``` +The `GNNGraph` constructor can also be used to create graphs with different representations. +```jldoctest +julia> g = GNNGraph([2,3,5], [1,2,4], graph_type=:sparse) +GNNGraph: + num_nodes: 5 + num_edges: 3 + +julia> g.graph +5×5 SparseArrays.SparseMatrixCSC{Int64, Int64} with 3 stored entries: + ⋅ ⋅ ⋅ ⋅ ⋅ + 1 ⋅ ⋅ ⋅ ⋅ + ⋅ 1 ⋅ ⋅ ⋅ + ⋅ ⋅ ⋅ ⋅ ⋅ + ⋅ ⋅ ⋅ 1 ⋅ + +julia> get_graph_type(g) +:sparse + +julia> gcoo = GNNGraph(g, graph_type=:coo); + +julia> gcoo.graph +([2, 3, 5], [1, 2, 4], [1, 1, 1]) +``` +""" +get_graph_type(::GNNGraph{<:COO_T}) = :coo +get_graph_type(::GNNGraph{<:SPARSE_T}) = :sparse +get_graph_type(::GNNGraph{<:ADJMAT_T}) = :dense Graphs.nv(g::GNNGraph) = g.num_nodes Graphs.ne(g::GNNGraph) = g.num_edges diff --git a/GNNGraphs/src/transform.jl b/GNNGraphs/src/transform.jl index 325a20f5c..6e0b564cd 100644 --- a/GNNGraphs/src/transform.jl +++ b/GNNGraphs/src/transform.jl @@ -731,6 +731,7 @@ end Set `w` as edge weights in the returned graph. """ function set_edge_weight(g::GNNGraph, w::AbstractVector) + # TODO preserve the representation instead of converting to COO s, t = edge_index(g) @assert length(w) == length(s) diff --git a/GNNGraphs/test/gnngraph.jl b/GNNGraphs/test/gnngraph.jl index d65f7adda..0c15e6cff 100644 --- a/GNNGraphs/test/gnngraph.jl +++ b/GNNGraphs/test/gnngraph.jl @@ -1,3 +1,6 @@ +# TODO test that the graph type is preserved +# when constructing a GNNGraph from another + @testset "Constructor: adjacency matrix" begin A = sprand(10, 10, 0.5) sA, tA, vA = findnz(A) diff --git a/GNNGraphs/test/query.jl b/GNNGraphs/test/query.jl index e7f55e76a..a345ae779 100644 --- a/GNNGraphs/test/query.jl +++ b/GNNGraphs/test/query.jl @@ -257,3 +257,20 @@ if GRAPH_T == :coo end end +@testset "get_graph_type" begin + g = rand_graph(10, 20, graph_type = GRAPH_T) + @test get_graph_type(g) == GRAPH_T + + gsparse = GNNGraph(g, graph_type=:sparse) + @test get_graph_type(gsparse) == :sparse + @test gsparse.graph isa SparseMatrixCSC + + gcoo = GNNGraph(g, graph_type=:coo) + @test get_graph_type(gcoo) == :coo + @test gcoo.graph[1:2] isa Tuple{Vector{Int}, Vector{Int}} + + + gdense = GNNGraph(g, graph_type=:dense) + @test get_graph_type(gdense) == :dense + @test gdense.graph isa Matrix{Int} +end diff --git a/GNNlib/Project.toml b/GNNlib/Project.toml index 20cf840c3..4acfceaaa 100644 --- a/GNNlib/Project.toml +++ b/GNNlib/Project.toml @@ -7,6 +7,7 @@ version = "0.2.2" ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c" +GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54" NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" @@ -14,28 +15,27 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" [weakdeps] +AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" [extensions] +GNNlibAMDGPUExt = "AMDGPU" GNNlibCUDAExt = "CUDA" +# GPUArraysCore is not needed as a direct dependency +# but pinning it to 0.1 avoids problems when we do Pkg.add("CUDA") in testing +# See https://github.com/JuliaGPU/CUDA.jl/issues/2564 + [compat] +AMDGPU = "1" CUDA = "4, 5" ChainRulesCore = "1.24" DataStructures = "0.18" GNNGraphs = "1.0" +GPUArraysCore = "0.1" LinearAlgebra = "1" MLUtils = "0.4" NNlib = "0.9" Random = "1" Statistics = "1" julia = "1.10" - -[extras] -ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823" -Reexport = "189a3867-3050-52da-a836-e630ba90ab69" -SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[targets] -test = ["Test", "ReTestItems", "Reexport", "SparseArrays"] diff --git a/GNNlib/ext/GNNlibAMDGPUExt.jl b/GNNlib/ext/GNNlibAMDGPUExt.jl new file mode 100644 index 000000000..83a4591c4 --- /dev/null +++ b/GNNlib/ext/GNNlibAMDGPUExt.jl @@ -0,0 +1,34 @@ +module GNNlibAMDGPUExt + +using AMDGPU: AnyROCMatrix +using Random, Statistics, LinearAlgebra +using GNNlib: GNNlib, propagate, copy_xj, e_mul_xj, w_mul_xj +using GNNGraphs: GNNGraph, COO_T, SPARSE_T + +###### PROPAGATE SPECIALIZATIONS #################### + +## COPY_XJ + +## avoid the fast path on gpu until we have better cuda support +function GNNlib.propagate(::typeof(copy_xj), g::GNNGraph{<:Union{COO_T, SPARSE_T}}, ::typeof(+), + xi, xj::AnyROCMatrix, e) + propagate((xi, xj, e) -> copy_xj(xi, xj, e), g, +, xi, xj, e) +end + +## E_MUL_XJ + +## avoid the fast path on gpu until we have better cuda support +function GNNlib.propagate(::typeof(e_mul_xj), g::GNNGraph{<:Union{COO_T, SPARSE_T}}, ::typeof(+), + xi, xj::AnyROCMatrix, e::AbstractVector) + propagate((xi, xj, e) -> e_mul_xj(xi, xj, e), g, +, xi, xj, e) +end + +## W_MUL_XJ + +## avoid the fast path on gpu until we have better support +function GNNlib.propagate(::typeof(w_mul_xj), g::GNNGraph{<:Union{COO_T, SPARSE_T}}, ::typeof(+), + xi, xj::AnyROCMatrix, e::Nothing) + propagate((xi, xj, e) -> w_mul_xj(xi, xj, e), g, +, xi, xj, e) +end + +end #module diff --git a/GNNlib/src/msgpass.jl b/GNNlib/src/msgpass.jl index 1aa17437a..f6a1cf659 100644 --- a/GNNlib/src/msgpass.jl +++ b/GNNlib/src/msgpass.jl @@ -184,7 +184,7 @@ xj_sub_xi(xi, xj, e) = xj .- xi """ e_mul_xj(xi, xj, e) = reshape(e, (...)) .* xj -Reshape `e` into broadcast compatible shape with `xj` +Reshape `e` into a broadcast compatible shape with `xj` (by prepending singleton dimensions) then perform broadcasted multiplication. """ diff --git a/GNNlib/test/Project.toml b/GNNlib/test/Project.toml new file mode 100644 index 000000000..36fcae23b --- /dev/null +++ b/GNNlib/test/Project.toml @@ -0,0 +1,22 @@ +[deps] +FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000" +Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" +Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196" +GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c" +GNNlib = "a6a84749-d869-43f8-aacc-be26a1996e48" +GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" +Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" +MLDataDevices = "7e8f7934-dd98-4c1a-8fe8-92b47a384d40" +MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54" +NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" +Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a" +Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" + +[compat] +GPUArraysCore = "0.1" diff --git a/GNNlib/test/msgpass.jl b/GNNlib/test/msgpass.jl new file mode 100644 index 000000000..5741cdb5a --- /dev/null +++ b/GNNlib/test/msgpass.jl @@ -0,0 +1,219 @@ +@testitem "msgpass" setup=[TestModuleGNNlib] begin + using .TestModuleGNNlib + #TODO test all graph types + g = TEST_GRAPHS[1] + out_channel = size(g.x, 1) + num_V = g.num_nodes + num_E = g.num_edges + g = GNNGraph(g, edata = rand(Float32, size(g.x, 1), g.num_edges)) + + @testset "propagate" begin + function message(xi, xj, e) + @test xi === nothing + @test e === nothing + ones(Float32, out_channel, size(xj, 2)) + end + + m = propagate(message, g, +, xj = g.x) + + @test size(m) == (out_channel, num_V) + + @testset "isolated nodes" begin + x1 = rand(1, 6) + g1 = GNNGraph(collect(1:5), collect(1:5), num_nodes = 6) + y1 = propagate((xi, xj, e) -> xj, g1, +, xj = x1) + @test size(y1) == (1, 6) + end + end + + @testset "apply_edges" begin + m = apply_edges(g, e = g.e) do xi, xj, e + @test xi === nothing + @test xj === nothing + ones(out_channel, size(e, 2)) + end + + @test m == ones(out_channel, num_E) + + # With NamedTuple input + m = apply_edges(g, xj = (; a = g.x, b = 2g.x), e = g.e) do xi, xj, e + @test xi === nothing + @test xj.b == 2 * xj.a + @test size(xj.a, 2) == size(xj.b, 2) == size(e, 2) + ones(out_channel, size(e, 2)) + end + + # NamedTuple output + m = apply_edges(g, e = g.e) do xi, xj, e + @test xi === nothing + @test xj === nothing + (; a = ones(out_channel, size(e, 2))) + end + + @test m.a == ones(out_channel, num_E) + + @testset "sizecheck" begin + x = rand(3, g.num_nodes - 1) + @test_throws AssertionError apply_edges(copy_xj, g, xj = x) + @test_throws AssertionError apply_edges(copy_xj, g, xi = x) + + x = (a = rand(3, g.num_nodes), b = rand(3, g.num_nodes + 1)) + @test_throws AssertionError apply_edges(copy_xj, g, xj = x) + @test_throws AssertionError apply_edges(copy_xj, g, xi = x) + + e = rand(3, g.num_edges - 1) + @test_throws AssertionError apply_edges(copy_xj, g, e = e) + end + end + + @testset "copy_xj" begin + n = 128 + A = sprand(n, n, 0.1) + Adj = map(x -> x > 0 ? 1 : 0, A) + X = rand(10, n) + + g = GNNGraph(A, ndata = X, graph_type = :coo) + + function spmm_copyxj_fused(g) + propagate(copy_xj, + g, +; xj = g.ndata.x) + end + + function spmm_copyxj_unfused(g) + propagate((xi, xj, e) -> xj, + g, +; xj = g.ndata.x) + end + + @test spmm_copyxj_unfused(g) ≈ X * Adj + @test spmm_copyxj_fused(g) ≈ X * Adj + end + + @testset "e_mul_xj and w_mul_xj for weighted conv" begin + n = 128 + A = sprand(n, n, 0.1) + Adj = map(x -> x > 0 ? 1 : 0, A) + X = rand(10, n) + + g = GNNGraph(A, ndata = X, edata = A.nzval, graph_type = :coo) + + function spmm_unfused(g) + propagate((xi, xj, e) -> reshape(e, 1, :) .* xj, + g, +; xj = g.ndata.x, e = g.edata.e) + end + function spmm_fused(g) + propagate(e_mul_xj, + g, +; xj = g.ndata.x, e = g.edata.e) + end + + function spmm_fused2(g) + propagate(w_mul_xj, + g, +; xj = g.ndata.x) + end + + @test spmm_unfused(g) ≈ X * A + @test spmm_fused(g) ≈ X * A + @test spmm_fused2(g) ≈ X * A + end + + @testset "aggregate_neighbors" begin + @testset "sizecheck" begin + m = rand(2, g.num_edges - 1) + @test_throws AssertionError aggregate_neighbors(g, +, m) + + m = (a = rand(2, g.num_edges + 1), b = nothing) + @test_throws AssertionError aggregate_neighbors(g, +, m) + end + end +end + +@testitem "propagate" setup=[TestModuleGNNlib] begin + using .TestModuleGNNlib + + @testset "copy_xj +" begin + for g in TEST_GRAPHS + f(g, x) = propagate(copy_xj, g, +, xj = x) + test_gradients(f, g, g.x; test_grad_f=false) + end + end + + @testset "copy_xj mean" begin + for g in TEST_GRAPHS + f(g, x) = propagate(copy_xj, g, mean, xj = x) + test_gradients(f, g, g.x; test_grad_f=false) + end + end + + @testset "e_mul_xj +" begin + for g in TEST_GRAPHS + e = rand(Float32, size(g.x, 1), g.num_edges) + f(g, x, e) = propagate(e_mul_xj, g, +; xj = x, e) + test_gradients(f, g, g.x, e; test_grad_f=false) + end + end + + @testset "w_mul_xj +" begin + for g in TEST_GRAPHS + w = rand(Float32, g.num_edges) + function f(g, x, w) + g = set_edge_weight(g, w) + return propagate(w_mul_xj, g, +, xj = x) + end + test_gradients(f, g, g.x, w; test_grad_f=false) + end + end +end + +@testitem "propagate GPU" setup=[TestModuleGNNlib] tags=[:gpu] begin + using .TestModuleGNNlib + + @testset "copy_xj +" begin + for g in TEST_GRAPHS + dev = gpu_device(force=true) + broken = get_graph_type(g) == :sparse && dev isa AMDGPUDevice + f(g, x) = propagate(copy_xj, g, +, xj = x) + @test test_gradients( + f, g, g.x; test_gpu=true, test_grad_f=false, compare_finite_diff=false + ) broken=broken + end + end + + @testset "copy_xj mean" begin + for g in TEST_GRAPHS + dev = gpu_device(force=true) + broken = get_graph_type(g) == :sparse && dev isa AMDGPUDevice + f(g, x) = propagate(copy_xj, g, mean, xj = x) + @test test_gradients( + f, g, g.x; test_gpu=true, test_grad_f=false, compare_finite_diff=false + ) broken=broken + end + end + + @testset "e_mul_xj +" begin + for g in TEST_GRAPHS + dev = gpu_device(force=true) + broken = get_graph_type(g) == :sparse && dev isa AMDGPUDevice + e = rand(Float32, size(g.x, 1), g.num_edges) + f(g, x, e) = propagate(e_mul_xj, g, +; xj = x, e) + @test test_gradients( + f, g, g.x, e; test_gpu=true, test_grad_f=false, compare_finite_diff=false + ) broken=broken + end + end + + @testset "w_mul_xj +" begin + for g in TEST_GRAPHS + w = rand(Float32, g.num_edges) + function f(g, x, w) + g = set_edge_weight(g, w) + return propagate(w_mul_xj, g, +, xj = x) + end + dev = gpu_device(force=true) + # @show get_graph_type(g) has_isolated_nodes(g) + # broken = get_graph_type(g) == :sparse + broken = true + @test test_gradients( + f, g, g.x, w; test_gpu=true, test_grad_f=false, compare_finite_diff=false + ) broken=broken + end + end +end diff --git a/GNNlib/test/msgpass_tests.jl b/GNNlib/test/msgpass_tests.jl deleted file mode 100644 index 60d13fcb0..000000000 --- a/GNNlib/test/msgpass_tests.jl +++ /dev/null @@ -1,140 +0,0 @@ -@testitem "msgpass" setup=[SharedTestSetup] begin - #TODO test all graph types - GRAPH_T = :coo - in_channel = 10 - out_channel = 5 - num_V = 6 - num_E = 14 - T = Float32 - - adj = [0 1 0 0 0 0 - 1 0 0 1 1 1 - 0 0 0 0 0 1 - 0 1 0 0 1 0 - 0 1 0 1 0 1 - 0 1 1 0 1 0] - - X = rand(T, in_channel, num_V) - E = rand(T, in_channel, num_E) - - g = GNNGraph(adj, graph_type = GRAPH_T) - - @testset "propagate" begin - function message(xi, xj, e) - @test xi === nothing - @test e === nothing - ones(T, out_channel, size(xj, 2)) - end - - m = propagate(message, g, +, xj = X) - - @test size(m) == (out_channel, num_V) - - @testset "isolated nodes" begin - x1 = rand(1, 6) - g1 = GNNGraph(collect(1:5), collect(1:5), num_nodes = 6) - y1 = propagate((xi, xj, e) -> xj, g, +, xj = x1) - @test size(y1) == (1, 6) - end - end - - @testset "apply_edges" begin - m = apply_edges(g, e = E) do xi, xj, e - @test xi === nothing - @test xj === nothing - ones(out_channel, size(e, 2)) - end - - @test m == ones(out_channel, num_E) - - # With NamedTuple input - m = apply_edges(g, xj = (; a = X, b = 2X), e = E) do xi, xj, e - @test xi === nothing - @test xj.b == 2 * xj.a - @test size(xj.a, 2) == size(xj.b, 2) == size(e, 2) - ones(out_channel, size(e, 2)) - end - - # NamedTuple output - m = apply_edges(g, e = E) do xi, xj, e - @test xi === nothing - @test xj === nothing - (; a = ones(out_channel, size(e, 2))) - end - - @test m.a == ones(out_channel, num_E) - - @testset "sizecheck" begin - x = rand(3, g.num_nodes - 1) - @test_throws AssertionError apply_edges(copy_xj, g, xj = x) - @test_throws AssertionError apply_edges(copy_xj, g, xi = x) - - x = (a = rand(3, g.num_nodes), b = rand(3, g.num_nodes + 1)) - @test_throws AssertionError apply_edges(copy_xj, g, xj = x) - @test_throws AssertionError apply_edges(copy_xj, g, xi = x) - - e = rand(3, g.num_edges - 1) - @test_throws AssertionError apply_edges(copy_xj, g, e = e) - end - end - - @testset "copy_xj" begin - n = 128 - A = sprand(n, n, 0.1) - Adj = map(x -> x > 0 ? 1 : 0, A) - X = rand(10, n) - - g = GNNGraph(A, ndata = X, graph_type = GRAPH_T) - - function spmm_copyxj_fused(g) - propagate(copy_xj, - g, +; xj = g.ndata.x) - end - - function spmm_copyxj_unfused(g) - propagate((xi, xj, e) -> xj, - g, +; xj = g.ndata.x) - end - - @test spmm_copyxj_unfused(g) ≈ X * Adj - @test spmm_copyxj_fused(g) ≈ X * Adj - end - - @testset "e_mul_xj and w_mul_xj for weighted conv" begin - n = 128 - A = sprand(n, n, 0.1) - Adj = map(x -> x > 0 ? 1 : 0, A) - X = rand(10, n) - - g = GNNGraph(A, ndata = X, edata = A.nzval, graph_type = GRAPH_T) - - function spmm_unfused(g) - propagate((xi, xj, e) -> reshape(e, 1, :) .* xj, - g, +; xj = g.ndata.x, e = g.edata.e) - end - function spmm_fused(g) - propagate(e_mul_xj, - g, +; xj = g.ndata.x, e = g.edata.e) - end - - function spmm_fused2(g) - propagate(w_mul_xj, - g, +; xj = g.ndata.x) - end - - @test spmm_unfused(g) ≈ X * A - @test spmm_fused(g) ≈ X * A - @test spmm_fused2(g) ≈ X * A - end - - @testset "aggregate_neighbors" begin - @testset "sizecheck" begin - m = rand(2, g.num_edges - 1) - @test_throws AssertionError aggregate_neighbors(g, +, m) - - m = (a = rand(2, g.num_edges + 1), b = nothing) - @test_throws AssertionError aggregate_neighbors(g, +, m) - end - end - -end \ No newline at end of file diff --git a/GNNlib/test/runtests.jl b/GNNlib/test/runtests.jl index e4c4512b4..d420f5a6c 100644 --- a/GNNlib/test/runtests.jl +++ b/GNNlib/test/runtests.jl @@ -1,6 +1,28 @@ -using GNNlib -using Test -using ReTestItems -using Random, Statistics +using TestItemRunner -runtests(GNNlib) +## See https://www.julia-vscode.org/docs/stable/userguide/testitems/ +## for how to run the tests within VS Code. +## See test_module.jl for the test infrastructure. + +## Uncomment below and in test_module.jl to change the default test settings +# ENV["GNN_TEST_CPU"] = "false" +# ENV["GNN_TEST_CUDA"] = "true" +# ENV["GNN_TEST_AMDGPU"] = "true" +# ENV["GNN_TEST_Metal"] = "true" + +# The only available tag at the moment is :gpu +# Tests not tagged with :gpu are considered to be CPU tests +# Tests tagged with :gpu should run on all GPU backends + +if get(ENV, "GNN_TEST_CPU", "true") == "true" + @run_package_tests filter = ti -> :gpu ∉ ti.tags +end +if get(ENV, "GNN_TEST_CUDA", "false") == "true" + @run_package_tests filter = ti -> :gpu ∈ ti.tags +end +if get(ENV, "GNN_TEST_AMDGPU", "false") == "true" + @run_package_tests filter = ti -> :gpu ∈ ti.tags +end +if get(ENV, "GNN_TEST_Metal", "false") == "true" + @run_package_tests filter = ti -> :gpu ∈ ti.tags +end diff --git a/GNNlib/test/shared_testsetup.jl b/GNNlib/test/shared_testsetup.jl deleted file mode 100644 index 106db5159..000000000 --- a/GNNlib/test/shared_testsetup.jl +++ /dev/null @@ -1,12 +0,0 @@ -@testsetup module SharedTestSetup - -import Reexport: @reexport - -@reexport using GNNlib -@reexport using GNNGraphs -@reexport using NNlib -@reexport using MLUtils -@reexport using SparseArrays -@reexport using Test, Random, Statistics - -end \ No newline at end of file diff --git a/GNNlib/test/test_module.jl b/GNNlib/test/test_module.jl new file mode 100644 index 000000000..27a83154c --- /dev/null +++ b/GNNlib/test/test_module.jl @@ -0,0 +1,180 @@ +@testmodule TestModuleGNNlib begin + +using Pkg + +### GPU backends settings ############ +# tried to put this in __init__ but is not executed for some reason + +## Uncomment below to change the default test settings +# ENV["GNN_TEST_CUDA"] = "true" +# ENV["GNN_TEST_AMDGPU"] = "true" +# ENV["GNN_TEST_Metal"] = "true" + +to_test(backend) = get(ENV, "GNN_TEST_$(backend)", "false") == "true" +has_dependecies(pkgs) = all(pkg -> haskey(Pkg.project().dependencies, pkg), pkgs) +deps_dict = Dict(:CUDA => ["CUDA", "cuDNN"], :AMDGPU => ["AMDGPU"], :Metal => ["Metal"]) + +for (backend, deps) in deps_dict + if to_test(backend) + if !has_dependecies(deps) + Pkg.add(deps) + end + @eval using $backend + if backend == :CUDA + @eval using cuDNN + end + @eval $backend.allowscalar(false) + end +end +###################################### + +import Reexport: @reexport + +@reexport using GNNlib +@reexport using GNNGraphs +@reexport using NNlib +@reexport using MLUtils +@reexport using SparseArrays +@reexport using Test, Random, Statistics +@reexport using MLDataDevices +using Functors: fmapstructure_with_path +using FiniteDifferences: FiniteDifferences +using Zygote: Zygote +using Flux: Flux + +# from this module +export D_IN, D_OUT, GRAPH_TYPES, TEST_GRAPHS, + test_gradients, finitediff_withgradient, + check_equal_leaves + + +const D_IN = 3 +const D_OUT = 5 + +function finitediff_withgradient(f, x...) + y = f(x...) + # We set a range to avoid domain errors + fdm = FiniteDifferences.central_fdm(5, 1, max_range=1e-2) + return y, FiniteDifferences.grad(fdm, f, x...) +end + +function check_equal_leaves(a, b; rtol=1e-4, atol=1e-4) + equal = true + fmapstructure_with_path(a, b) do kp, x, y + if x isa AbstractArray + # @show kp + # @assert isapprox(x, y; rtol, atol) + if !isapprox(x, y; rtol, atol) + equal = false + end + end + end + @assert equal +end + +function test_gradients( + f, + graph::GNNGraph, + xs...; + rtol=1e-5, atol=1e-5, + test_gpu = false, + test_grad_f = true, + test_grad_x = true, + compare_finite_diff = true, + loss = (f, g, xs...) -> mean(f(g, xs...)), + ) + + if !test_gpu && !compare_finite_diff + error("You should either compare finite diff vs CPU AD \ + or CPU AD vs GPU AD.") + end + + ## Let's make sure first that the forward pass works. + l = loss(f, graph, xs...) + @assert l isa Number + if test_gpu + gpu_dev = gpu_device(force=true) + cpu_dev = cpu_device() + graph_gpu = graph |> gpu_dev + xs_gpu = xs |> gpu_dev + f_gpu = f |> gpu_dev + l_gpu = loss(f_gpu, graph_gpu, xs_gpu...) + @assert l_gpu isa Number + end + + if test_grad_x + # Zygote gradient with respect to input. + y, g = Zygote.withgradient((xs...) -> loss(f, graph, xs...), xs...) + + if compare_finite_diff + # Cast to Float64 to avoid precision issues. + f64 = f |> Flux.f64 + xs64 = xs .|> Flux.f64 + y_fd, g_fd = finitediff_withgradient((xs...) -> loss(f64, graph, xs...), xs64...) + @assert isapprox(y, y_fd; rtol, atol) + check_equal_leaves(g, g_fd; rtol, atol) + end + + if test_gpu + # Zygote gradient with respect to input on GPU. + y_gpu, g_gpu = Zygote.withgradient((xs...) -> loss(f_gpu, graph_gpu, xs...), xs_gpu...) + @assert get_device(g_gpu) == get_device(xs_gpu) + @assert isapprox(y_gpu, y; rtol, atol) + check_equal_leaves(g_gpu |> cpu_dev, g; rtol, atol) + end + end + + if test_grad_f + # Zygote gradient with respect to f. + y, g = Zygote.withgradient(f -> loss(f, graph, xs...), f) + + if compare_finite_diff + # Cast to Float64 to avoid precision issues. + f64 = f |> Flux.f64 + ps, re = Flux.destructure(f64) + y_fd, g_fd = finitediff_withgradient(ps -> loss(re(ps),graph, xs...), ps) + g_fd = (re(g_fd[1]),) + @assert isapprox(y, y_fd; rtol, atol) + check_equal_leaves(g, g_fd; rtol, atol) + end + + if test_gpu + # Zygote gradient with respect to f on GPU. + y_gpu, g_gpu = Zygote.withgradient(f -> loss(f,graph_gpu, xs_gpu...), f_gpu) + # @assert get_device(g_gpu) == get_device(xs_gpu) + @assert isapprox(y_gpu, y; rtol, atol) + check_equal_leaves(g_gpu |> cpu_dev, g; rtol, atol) + end + end + @test true # if we reach here, the test passed + return true +end + +function generate_test_graphs(graph_type) + adj1 = [0 1 0 1 + 1 0 1 0 + 0 1 0 1 + 1 0 1 0] + + g1 = GNNGraph(adj1, + ndata = rand(Float32, D_IN, 4); + graph_type) + + adj_single_vertex = [0 0 0 1 + 0 0 0 0 + 0 0 0 1 + 1 0 1 0] + + g_single_vertex = GNNGraph(adj_single_vertex, + ndata = rand(Float32, D_IN, 4); + graph_type) + + return (g1, g_single_vertex) +end + +GRAPH_TYPES = [:coo, :dense, :sparse] +TEST_GRAPHS = [generate_test_graphs(:coo)..., + generate_test_graphs(:dense)..., + generate_test_graphs(:sparse)...] + +end # module \ No newline at end of file diff --git a/GNNlib/test/utils_tests.jl b/GNNlib/test/utils.jl similarity index 96% rename from GNNlib/test/utils_tests.jl rename to GNNlib/test/utils.jl index 762ba58b9..bf06f86fd 100644 --- a/GNNlib/test/utils_tests.jl +++ b/GNNlib/test/utils.jl @@ -1,4 +1,5 @@ -@testitem "utils" setup=[SharedTestSetup] begin +@testitem "utils" setup=[TestModuleGNNlib] begin + using .TestModuleGNNlib # TODO test all graph types GRAPH_T = :coo De, Dx = 3, 2 diff --git a/GraphNeuralNetworks/test/Project.toml b/GraphNeuralNetworks/test/Project.toml index 5e554715c..d6f77b391 100644 --- a/GraphNeuralNetworks/test/Project.toml +++ b/GraphNeuralNetworks/test/Project.toml @@ -3,6 +3,7 @@ ChainRulesTestUtils = "cdddcdb0-9152-4a09-a978-84456f9df70a" FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000" Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196" +GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" GraphNeuralNetworks = "cffab07f-9bc2-4db1-8861-388f63bf7694" Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458" @@ -13,3 +14,6 @@ Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" + +[compat] +GPUArraysCore = "0.1" diff --git a/GraphNeuralNetworks/test/test_module.jl b/GraphNeuralNetworks/test/test_module.jl index e7f9a0031..fa50e5821 100644 --- a/GraphNeuralNetworks/test/test_module.jl +++ b/GraphNeuralNetworks/test/test_module.jl @@ -3,25 +3,25 @@ using Pkg ## Uncomment below to change the default test settings -# ENV["GNN_TEST_CPU"] = "false" # ENV["GNN_TEST_CUDA"] = "true" # ENV["GNN_TEST_AMDGPU"] = "true" # ENV["GNN_TEST_Metal"] = "true" -if get(ENV, "GNN_TEST_CUDA", "false") == "true" - Pkg.add(["CUDA", "cuDNN"]) - using CUDA - CUDA.allowscalar(false) -end -if get(ENV, "GNN_TEST_AMDGPU", "false") == "true" - Pkg.add("AMDGPU") - using AMDGPU - AMDGPU.allowscalar(false) -end -if get(ENV, "GNN_TEST_Metal", "false") == "true" - Pkg.add("Metal") - using Metal - Metal.allowscalar(false) +to_test(backend) = get(ENV, "GNN_TEST_$(backend)", "false") == "true" +has_dependecies(pkgs) = all(pkg -> haskey(Pkg.project().dependencies, pkg), pkgs) +deps_dict = Dict(:CUDA => ["CUDA", "cuDNN"], :AMDGPU => ["AMDGPU"], :Metal => ["Metal"]) + +for (backend, deps) in deps_dict + if to_test(backend) + if !has_dependecies(deps) + Pkg.add(deps) + end + @eval using $backend + if backend == :CUDA + @eval using cuDNN + end + @eval $backend.allowscalar(false) + end end using GraphNeuralNetworks @@ -59,15 +59,17 @@ function finitediff_withgradient(f, x...) end function check_equal_leaves(a, b; rtol=1e-4, atol=1e-4) + equal = true fmapstructure_with_path(a, b) do kp, x, y if x isa AbstractArray # @show kp - @test x ≈ y rtol=rtol atol=atol - # elseif x isa Number - # @show kp - # @test x ≈ y rtol=rtol atol=atol + # @assert isapprox(x, y; rtol, atol) + if !isapprox(x, y; rtol, atol) + equal = false + end end end + @assert equal end function test_gradients( @@ -89,7 +91,7 @@ function test_gradients( ## Let's make sure first that the forward pass works. l = loss(f, graph, xs...) - @test l isa Number + @assert l isa Number if test_gpu gpu_dev = gpu_device(force=true) cpu_dev = cpu_device() @@ -97,7 +99,7 @@ function test_gradients( xs_gpu = xs |> gpu_dev f_gpu = f |> gpu_dev l_gpu = loss(f_gpu, graph_gpu, xs_gpu...) - @test l_gpu isa Number + @assert l_gpu isa Number end if test_grad_x @@ -109,15 +111,15 @@ function test_gradients( f64 = f |> Flux.f64 xs64 = xs .|> Flux.f64 y_fd, g_fd = finitediff_withgradient((xs...) -> loss(f64, graph, xs...), xs64...) - @test y ≈ y_fd rtol=rtol atol=atol + @assert isapprox(y, y_fd; rtol, atol) check_equal_leaves(g, g_fd; rtol, atol) end if test_gpu # Zygote gradient with respect to input on GPU. y_gpu, g_gpu = Zygote.withgradient((xs...) -> loss(f_gpu, graph_gpu, xs...), xs_gpu...) - @test get_device(g_gpu) == get_device(xs_gpu) - @test y_gpu ≈ y rtol=rtol atol=atol + @assert get_device(g_gpu) == get_device(xs_gpu) + @assert isapprox(y_gpu, y; rtol, atol) check_equal_leaves(g_gpu |> cpu_dev, g; rtol, atol) end end @@ -132,18 +134,19 @@ function test_gradients( ps, re = Flux.destructure(f64) y_fd, g_fd = finitediff_withgradient(ps -> loss(re(ps),graph, xs...), ps) g_fd = (re(g_fd[1]),) - @test y ≈ y_fd rtol=rtol atol=atol + @assert isapprox(y, y_fd; rtol, atol) check_equal_leaves(g, g_fd; rtol, atol) end if test_gpu # Zygote gradient with respect to f on GPU. y_gpu, g_gpu = Zygote.withgradient(f -> loss(f,graph_gpu, xs_gpu...), f_gpu) - # @test get_device(g_gpu) == get_device(xs_gpu) - @test y_gpu ≈ y rtol=rtol atol=atol + # @assert get_device(g_gpu) == get_device(xs_gpu) + @assert isapprox(y_gpu, y; rtol, atol) check_equal_leaves(g_gpu |> cpu_dev, g; rtol, atol) end end + @test true # if we reach here, the test passed return true end