diff --git a/src/layers/conv.jl b/src/layers/conv.jl index d6c97a0b8..8c3565dce 100644 --- a/src/layers/conv.jl +++ b/src/layers/conv.jl @@ -300,8 +300,7 @@ l = GATConv(in_channel => out_channel, add_self_loops = false, bias = false; hea y = l(g, x) ``` """ -struct GATConv{DX <: Dense, DE <: Union{Dense, Nothing}, DV, T, A <: AbstractMatrix, F, B} <: - GNNLayer +struct GATConv{DX<:Dense,DE<:Union{Dense, Nothing},DV,T,A<:AbstractMatrix,F,B} <: GNNLayer dense_x::DX dense_e::DE bias::B @@ -316,7 +315,7 @@ struct GATConv{DX <: Dense, DE <: Union{Dense, Nothing}, DV, T, A <: AbstractMat end Flux.@layer GATConv -Flux.trainable(l::GATConv) = (dense_x = l.dense_x, dense_e = l.dense_e, bias = l.bias, a = l.a) +Flux.trainable(l::GATConv) = (; l.dense_x, l.dense_e, l.bias, l.a) GATConv(ch::Pair{Int, Int}, args...; kws...) = GATConv((ch[1], 0) => ch[2], args...; kws...) diff --git a/test/runtests.jl b/test/runtests.jl index 8e518ec3d..05cb6fd5f 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -35,14 +35,11 @@ tests = [ !CUDA.functional() && @warn("CUDA unavailable, not testing GPU support") # @testset "GraphNeuralNetworks: graph format $graph_type" for graph_type in (:coo, :dense, :sparse) -# for graph_type in (:coo, :dense, :sparse) -for graph_type in (:dense,) +for graph_type in (:coo, :dense, :sparse) @info "Testing graph format :$graph_type" global GRAPH_T = graph_type global TEST_GPU = CUDA.functional() && (GRAPH_T != :sparse) - # global GRAPH_T = :sparse - # global TEST_GPU = false @testset "$t" for t in tests startswith(t, "examples") && GRAPH_T == :dense && continue # not testing :dense since causes OutOfMememory on github's CI