From 065ec3f763b77e7fca86281433465164b9862f0b Mon Sep 17 00:00:00 2001 From: Carlo Lucibello Date: Sun, 1 Dec 2024 15:07:56 +0100 Subject: [PATCH] simplify documentation (#531) --- .github/workflows/test_GNNGraphs.yml | 2 +- .github/workflows/test_GNNLux.yml | 3 +- .github/workflows/test_GNNlib.yml | 2 +- .../workflows/test_GraphNeuralNetworks.yml | 2 +- .gitignore | 3 +- GNNGraphs/docs/make.jl | 6 +- GNNGraphs/docs/src/{ => guides}/datasets.md | 0 GNNGraphs/docs/src/{ => guides}/gnngraph.md | 0 .../docs/src/{ => guides}/heterograph.md | 0 .../docs/src/{ => guides}/temporalgraph.md | 0 GNNlib/docs/make.jl | 2 +- .../docs/src/{ => guides}/messagepassing.md | 0 GNNlib/test/msgpass.jl | 10 +- GNNlib/test/test_module.jl | 16 ++- GraphNeuralNetworks/docs/make.jl | 64 ++++++----- GraphNeuralNetworks/docs/src/datasets.md | 5 - GraphNeuralNetworks/docs/src/gsoc.md | 3 - .../docs/src/{ => guides}/models.md | 0 GraphNeuralNetworks/docs/src/home.md | 87 -------------- GraphNeuralNetworks/docs/src/index.md | 108 ++++++++++++++---- GraphNeuralNetworks/docs/src/other/dummy.md | 0 GraphNeuralNetworks/test/layers/conv.jl | 17 ++- GraphNeuralNetworks/test/test_module.jl | 27 ++++- docs/make-multi.jl | 6 +- 24 files changed, 190 insertions(+), 173 deletions(-) rename GNNGraphs/docs/src/{ => guides}/datasets.md (100%) rename GNNGraphs/docs/src/{ => guides}/gnngraph.md (100%) rename GNNGraphs/docs/src/{ => guides}/heterograph.md (100%) rename GNNGraphs/docs/src/{ => guides}/temporalgraph.md (100%) rename GNNlib/docs/src/{ => guides}/messagepassing.md (100%) delete mode 100644 GraphNeuralNetworks/docs/src/datasets.md delete mode 100644 GraphNeuralNetworks/docs/src/gsoc.md rename GraphNeuralNetworks/docs/src/{ => guides}/models.md (100%) delete mode 100644 GraphNeuralNetworks/docs/src/home.md create mode 100644 GraphNeuralNetworks/docs/src/other/dummy.md diff --git a/.github/workflows/test_GNNGraphs.yml b/.github/workflows/test_GNNGraphs.yml index 3c631a371..6b9e52ddd 100644 --- a/.github/workflows/test_GNNGraphs.yml +++ b/.github/workflows/test_GNNGraphs.yml @@ -41,7 +41,7 @@ jobs: Pkg.test("GNNGraphs"; coverage=true) - uses: julia-actions/julia-processcoverage@v1 with: - directories: ./GNNGraphs/src + directories: GNNGraphs/src,GNNGraphs/ext - uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/test_GNNLux.yml b/.github/workflows/test_GNNLux.yml index 34f44e257..9b7dc9b6e 100644 --- a/.github/workflows/test_GNNLux.yml +++ b/.github/workflows/test_GNNLux.yml @@ -41,8 +41,7 @@ jobs: Pkg.test("GNNLux"; coverage=true) - uses: julia-actions/julia-processcoverage@v1 with: - # directories: ./GNNLux/src, ./GNNLux/ext - directories: ./GNNLux/src + directories: GNNLux/src,GNNGraphs/src,GNNGraphs/ext,GNNlib/src,GNNlib/ext - uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/test_GNNlib.yml b/.github/workflows/test_GNNlib.yml index 047940973..d858bb8d7 100644 --- a/.github/workflows/test_GNNlib.yml +++ b/.github/workflows/test_GNNlib.yml @@ -41,7 +41,7 @@ jobs: Pkg.test("GNNlib"; coverage=true) - uses: julia-actions/julia-processcoverage@v1 with: - directories: ./GNNlib/src ./GNNGraphs/src + directories: GNNlib/src,GNNlib/ext,GNNGraphs/src,GNNGraphs/ext - uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/test_GraphNeuralNetworks.yml b/.github/workflows/test_GraphNeuralNetworks.yml index c88a85fd5..faf26a43f 100644 --- a/.github/workflows/test_GraphNeuralNetworks.yml +++ b/.github/workflows/test_GraphNeuralNetworks.yml @@ -41,7 +41,7 @@ jobs: Pkg.test("GraphNeuralNetworks"; coverage=true) - uses: julia-actions/julia-processcoverage@v1 with: - directories: ./GraphNeuralNetworks/src ./GNNGraphs/src ./GNNlib/src + directories: GraphNeuralNetworks/src,GNNGraphs/src,GNNGraphs/ext,GNNlib/src,GNNlib/ext - uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.gitignore b/.gitignore index b40b4d732..831ed33f2 100644 --- a/.gitignore +++ b/.gitignore @@ -15,4 +15,5 @@ GNNGraphs/docs/build GNNlib/docs/build GNNLux/docs/build GraphNeuralNetworks/docs/build -tutorials/docs/build \ No newline at end of file +GraphNeuralNetworks/docs/src/other +tutorials/docs/build diff --git a/GNNGraphs/docs/make.jl b/GNNGraphs/docs/make.jl index 2a923ac19..d94b291b1 100644 --- a/GNNGraphs/docs/make.jl +++ b/GNNGraphs/docs/make.jl @@ -16,8 +16,10 @@ makedocs(; format = Documenter.HTML(; mathengine, prettyurls, assets = assets, size_threshold=nothing), sitename = "GNNGraphs.jl", pages = ["Home" => "index.md", - "Graphs" => ["gnngraph.md", "heterograph.md", "temporalgraph.md"], - "Datasets" => "datasets.md", + "Guides" => [ + "Graphs" => ["guides/gnngraph.md", "guides/heterograph.md", "guides/temporalgraph.md"], + "Datasets" => "guides/datasets.md", + ], "API Reference" => [ "GNNGraph" => "api/gnngraph.md", "GNNHeteroGraph" => "api/heterograph.md", diff --git a/GNNGraphs/docs/src/datasets.md b/GNNGraphs/docs/src/guides/datasets.md similarity index 100% rename from GNNGraphs/docs/src/datasets.md rename to GNNGraphs/docs/src/guides/datasets.md diff --git a/GNNGraphs/docs/src/gnngraph.md b/GNNGraphs/docs/src/guides/gnngraph.md similarity index 100% rename from GNNGraphs/docs/src/gnngraph.md rename to GNNGraphs/docs/src/guides/gnngraph.md diff --git a/GNNGraphs/docs/src/heterograph.md b/GNNGraphs/docs/src/guides/heterograph.md similarity index 100% rename from GNNGraphs/docs/src/heterograph.md rename to GNNGraphs/docs/src/guides/heterograph.md diff --git a/GNNGraphs/docs/src/temporalgraph.md b/GNNGraphs/docs/src/guides/temporalgraph.md similarity index 100% rename from GNNGraphs/docs/src/temporalgraph.md rename to GNNGraphs/docs/src/guides/temporalgraph.md diff --git a/GNNlib/docs/make.jl b/GNNlib/docs/make.jl index 1e10396c0..dba621165 100644 --- a/GNNlib/docs/make.jl +++ b/GNNlib/docs/make.jl @@ -21,7 +21,7 @@ makedocs(; format = Documenter.HTML(; mathengine, prettyurls, assets = assets, size_threshold=nothing), sitename = "GNNlib.jl", pages = ["Home" => "index.md", - "Message Passing" => "messagepassing.md", + "Message Passing" => "guides/messagepassing.md", "API Reference" => [ diff --git a/GNNlib/docs/src/messagepassing.md b/GNNlib/docs/src/guides/messagepassing.md similarity index 100% rename from GNNlib/docs/src/messagepassing.md rename to GNNlib/docs/src/guides/messagepassing.md diff --git a/GNNlib/test/msgpass.jl b/GNNlib/test/msgpass.jl index 5741cdb5a..a9dff3a56 100644 --- a/GNNlib/test/msgpass.jl +++ b/GNNlib/test/msgpass.jl @@ -168,8 +168,7 @@ end @testset "copy_xj +" begin for g in TEST_GRAPHS - dev = gpu_device(force=true) - broken = get_graph_type(g) == :sparse && dev isa AMDGPUDevice + broken = get_graph_type(g) == :sparse && gpu_backend() == "AMDGPU" f(g, x) = propagate(copy_xj, g, +, xj = x) @test test_gradients( f, g, g.x; test_gpu=true, test_grad_f=false, compare_finite_diff=false @@ -179,8 +178,7 @@ end @testset "copy_xj mean" begin for g in TEST_GRAPHS - dev = gpu_device(force=true) - broken = get_graph_type(g) == :sparse && dev isa AMDGPUDevice + broken = get_graph_type(g) == :sparse && gpu_backend() == "AMDGPU" f(g, x) = propagate(copy_xj, g, mean, xj = x) @test test_gradients( f, g, g.x; test_gpu=true, test_grad_f=false, compare_finite_diff=false @@ -190,8 +188,7 @@ end @testset "e_mul_xj +" begin for g in TEST_GRAPHS - dev = gpu_device(force=true) - broken = get_graph_type(g) == :sparse && dev isa AMDGPUDevice + broken = get_graph_type(g) == :sparse && gpu_backend() == "AMDGPU" e = rand(Float32, size(g.x, 1), g.num_edges) f(g, x, e) = propagate(e_mul_xj, g, +; xj = x, e) @test test_gradients( @@ -207,7 +204,6 @@ end g = set_edge_weight(g, w) return propagate(w_mul_xj, g, +, xj = x) end - dev = gpu_device(force=true) # @show get_graph_type(g) has_isolated_nodes(g) # broken = get_graph_type(g) == :sparse broken = true diff --git a/GNNlib/test/test_module.jl b/GNNlib/test/test_module.jl index 27a83154c..b6894cdfa 100644 --- a/GNNlib/test/test_module.jl +++ b/GNNlib/test/test_module.jl @@ -45,7 +45,7 @@ using Flux: Flux # from this module export D_IN, D_OUT, GRAPH_TYPES, TEST_GRAPHS, test_gradients, finitediff_withgradient, - check_equal_leaves + check_equal_leaves, gpu_backend const D_IN = 3 @@ -177,4 +177,18 @@ TEST_GRAPHS = [generate_test_graphs(:coo)..., generate_test_graphs(:dense)..., generate_test_graphs(:sparse)...] + +function gpu_backend() + dev = gpu_device() + if dev isa CUDADevice + return "CUDA" + elseif dev isa AMDGPUDevice + return "AMDGPU" + elseif dev isa MetalDevice + return "Metal" + else + return "Unknown" + end +end + end # module \ No newline at end of file diff --git a/GraphNeuralNetworks/docs/make.jl b/GraphNeuralNetworks/docs/make.jl index 7698a3abe..1248d4214 100644 --- a/GraphNeuralNetworks/docs/make.jl +++ b/GraphNeuralNetworks/docs/make.jl @@ -14,34 +14,44 @@ interlinks = InterLinks( ) +# Copy the guides from GNNGraphs and GNNlib +dest_guides_dir = joinpath(@__DIR__, "src/other") +gnngraphs_guides_dir = joinpath(@__DIR__, "../../GNNGraphs/docs/src/guides") +gnnlib_guides_dir = joinpath(@__DIR__, "../../GNNlib/docs/src/guides") +for file in readdir(gnngraphs_guides_dir) + cp(joinpath(gnngraphs_guides_dir, file), joinpath(dest_guides_dir, file), force=true) +end +for file in readdir(gnnlib_guides_dir) + cp(joinpath(gnnlib_guides_dir, file), joinpath(dest_guides_dir, file), force=true) +end + makedocs(; - modules = [GraphNeuralNetworks], - doctest = false, - clean = true, - plugins = [interlinks], - format = Documenter.HTML(; mathengine, prettyurls, assets = assets, size_threshold=nothing), - sitename = "GraphNeuralNetworks.jl", - pages = ["Monorepo" => [ - "Home" => "index.md", - "Developer guide" => "dev.md", - "Google Summer of Code" => "gsoc.md", - ], - "GraphNeuralNetworks.jl" =>[ - "Home" => "home.md", - "Models" => "models.md",], + modules = [GraphNeuralNetworks], + doctest = false, # TODO: enable doctest + clean = true, + plugins = [interlinks], + format = Documenter.HTML(; mathengine, prettyurls, assets = assets, size_threshold=nothing), + sitename = "GraphNeuralNetworks.jl", + pages = [ + + "Home" => "index.md", + + "Guides" => [ + "Graphs" => ["other/gnngraph.md", "other/heterograph.md", "other/temporalgraph.md"], + "Message Passing" => "other/messagepassing.md", + "Models" => "guides/models.md", + "Datasets" => "other/datasets.md", + ], - "API Reference" => [ - - "Basic" => "api/basic.md", - "Convolutional layers" => "api/conv.md", - "Pooling layers" => "api/pool.md", - "Temporal Convolutional layers" => "api/temporalconv.md", - "Hetero Convolutional layers" => "api/heteroconv.md", - - - ], - - ], - ) + "API Reference" => [ + "Basic" => "api/basic.md", + "Convolutional layers" => "api/conv.md", + "Pooling layers" => "api/pool.md", + "Temporal Convolutional layers" => "api/temporalconv.md", + "Hetero Convolutional layers" => "api/heteroconv.md", + ], + "Developer guide" => "dev.md", + ], +) deploydocs(;repo = "github.com/JuliaGraphs/GraphNeuralNetworks.jl.git", devbranch = "master", dirname= "GraphNeuralNetworks") diff --git a/GraphNeuralNetworks/docs/src/datasets.md b/GraphNeuralNetworks/docs/src/datasets.md deleted file mode 100644 index 050f27b3c..000000000 --- a/GraphNeuralNetworks/docs/src/datasets.md +++ /dev/null @@ -1,5 +0,0 @@ -# Datasets - -GraphNeuralNetworks.jl doesn't come with its own datasets, but leverages those available in the Julia (and non-Julia) ecosystem. In particular, the [examples in the GraphNeuralNetworks.jl repository](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/examples) make use of the [MLDatasets.jl](https://github.com/JuliaML/MLDatasets.jl) package. There you will find common graph datasets such as Cora, PubMed, Citeseer, TUDataset and [many others](https://juliaml.github.io/MLDatasets.jl/dev/datasets/graphs/). - -GraphNeuralNetworks.jl provides the [`GNNGraphs.mldataset2gnngraph`](@ref) method for interfacing with MLDatasets.jl. \ No newline at end of file diff --git a/GraphNeuralNetworks/docs/src/gsoc.md b/GraphNeuralNetworks/docs/src/gsoc.md deleted file mode 100644 index a764f4dd7..000000000 --- a/GraphNeuralNetworks/docs/src/gsoc.md +++ /dev/null @@ -1,3 +0,0 @@ -# Graph Neural Networks - Summer of Code - -Potential candidates to Google Summer of Code's scholarships can find out about the available projects involving GraphNeuralNetworks.jl on the [dedicated page](https://julialang.org/jsoc/gsoc/gnn/) in the Julia Language website. diff --git a/GraphNeuralNetworks/docs/src/models.md b/GraphNeuralNetworks/docs/src/guides/models.md similarity index 100% rename from GraphNeuralNetworks/docs/src/models.md rename to GraphNeuralNetworks/docs/src/guides/models.md diff --git a/GraphNeuralNetworks/docs/src/home.md b/GraphNeuralNetworks/docs/src/home.md deleted file mode 100644 index fa28621a7..000000000 --- a/GraphNeuralNetworks/docs/src/home.md +++ /dev/null @@ -1,87 +0,0 @@ -# GraphNeuralNetworks - -GraphNeuralNetworks.jl is a graph neural network package based on the deep learning framework [Flux.jl](https://github.com/FluxML/Flux.jl). - -It provides a set of stateful graph convolutional layers and utilities to build graph neural networks. - -Among its features: - -* Implements common graph convolutional layers. -* Supports computations on batched graphs. -* Easy to define custom layers. -* CUDA support. -* Integration with [Graphs.jl](https://github.com/JuliaGraphs/Graphs.jl). -* [Examples](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/GraphNeuralNetworks/examples) of node, edge, and graph level machine learning tasks. -* Heterogeneous and temporal graphs. - - -## Package overview - -Let's give a brief overview of the package by solving a graph regression problem with synthetic data. - -Usage examples on real datasets can be found in the [examples](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/GraphNeuralNetworks/examples) folder. - -### Data preparation - -We create a dataset consisting in multiple random graphs and associated data features. - -```julia -using GraphNeuralNetworks, Flux, CUDA, Statistics, MLUtils -using Flux: DataLoader - -all_graphs = GNNGraph[] - -for _ in 1:1000 - g = rand_graph(10, 40, - ndata=(; x = randn(Float32, 16,10)), # Input node features - gdata=(; y = randn(Float32))) # Regression target - push!(all_graphs, g) -end -``` - -### Model building - -We concisely define our model as a [`GraphNeuralNetworks.GNNChain`](@ref) containing two graph convolutional layers. If CUDA is available, our model will live on the gpu. - -```julia -device = CUDA.functional() ? Flux.gpu : Flux.cpu; - -model = GNNChain(GCNConv(16 => 64), - BatchNorm(64), # Apply batch normalization on node features (nodes dimension is batch dimension) - x -> relu.(x), - GCNConv(64 => 64, relu), - GlobalPool(mean), # Aggregate node-wise features into graph-wise features - Dense(64, 1)) |> device - -opt = Flux.setup(Adam(1f-4), model) -``` - -### Training - -Finally, we use a standard Flux training pipeline to fit our dataset. -We use Flux's `DataLoader` to iterate over mini-batches of graphs -that are glued together into a single `GNNGraph` using the `MLUtils.batch` method. This is what happens under the hood when creating a `DataLoader` with the -`collate=true` option. - -```julia -train_graphs, test_graphs = MLUtils.splitobs(all_graphs, at=0.8) - -train_loader = DataLoader(train_graphs, - batchsize=32, shuffle=true, collate=true) -test_loader = DataLoader(test_graphs, - batchsize=32, shuffle=false, collate=true) - -loss(model, g::GNNGraph) = mean((vec(model(g, g.x)) - g.y).^2) - -loss(model, loader) = mean(loss(model, g |> device) for g in loader) - -for epoch in 1:100 - for g in train_loader - g = g |> device - grad = gradient(model -> loss(model, g), model) - Flux.update!(opt, model, grad[1]) - end - - @info (; epoch, train_loss=loss(model, train_loader), test_loss=loss(model, test_loader)) -end -``` diff --git a/GraphNeuralNetworks/docs/src/index.md b/GraphNeuralNetworks/docs/src/index.md index 692347aea..d0500d3e1 100644 --- a/GraphNeuralNetworks/docs/src/index.md +++ b/GraphNeuralNetworks/docs/src/index.md @@ -1,43 +1,107 @@ -# GraphNeuralNetworks Monorepo +# GraphNeuralNetworks -This is the monorepository for the GraphNeuralNetworks project, bringing together all code into a unified structure to facilitate code sharing and reusability across different project components. It contains the following packages: +GraphNeuralNetworks.jl is a graph neural network package based on the deep learning framework [Flux.jl](https://github.com/FluxML/Flux.jl). -- `GraphNeuralNetwork.jl`: Package that contains stateful graph convolutional layers based on the machine learning framework [Flux.jl](https://fluxml.ai/Flux.jl/stable/). This is fronted package for Flux users. It depends on GNNlib.jl, GNNGraphs.jl, and Flux.jl packages. +It provides a set of stateful graph convolutional layers and utilities to build graph neural networks. -- `GNNLux.jl`: Package that contains stateless graph convolutional layers based on the machine learning framework [Lux.jl](https://lux.csail.mit.edu/stable/). This is fronted package for Lux users. It depends on GNNlib.jl, GNNGraphs.jl, and Lux.jl packages. +Among its features: -- `GNNlib.jl`: Package that contains the core graph neural network layers and utilities. It depends on GNNGraphs.jl and GNNlib.jl packages and serves for code base for GraphNeuralNetwork.jl and GNNLux.jl packages. - -- `GNNGraphs.jl`: Package that contains the graph data structures and helper functions for working with graph data. It depends on Graphs.jl package. - -Here is a schema of the dependencies between the packages: - -![Monorepo schema](assets/schema.png) - - -Among its general features: - -* Implements common graph convolutional layers both in stateful and stateless form. +* Implements common graph convolutional layers. * Supports computations on batched graphs. * Easy to define custom layers. * CUDA support. * Integration with [Graphs.jl](https://github.com/JuliaGraphs/Graphs.jl). * [Examples](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/GraphNeuralNetworks/examples) of node, edge, and graph level machine learning tasks. -* Heterogeneous and temporal graphs. +* Heterogeneous and temporal graphs. + +The package is part of a larger ecosystem of packages that includes [GNNlib.jl](https://juliagraphs.org/GraphNeuralNetworks.jl/gnnlib), [GNNGraphs.jl](https://juliagraphs.org/GraphNeuralNetworks.jl/gnngraphs), and [GNNLux.jl](https://juliagraphs.org/GraphNeuralNetworks.jl/gnnlux). + +GraphNeuralNetworks.jl is the fronted package for Flux.jl users. [Lux.jl](https://lux.csail.mit.edu/stable/) users instead, can relyi on GNNLux.jl (still in development). ## Installation -GraphNeuralNetworks.jl, GNNlib.jl and GNNGraphs.jl are a registered Julia packages. You can easily install a package, for example GraphNeuralNetworks.jl, through the package manager : +GraphNeuralNetworks.jl is a registered Julia package. You can easily install it through the package manager : ```julia pkg> add GraphNeuralNetworks ``` -## Usage +## Package overview + +Let's give a brief overview of the package by solving a graph regression problem with synthetic data. + +Other usage examples can be found in the [examples](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/GraphNeuralNetworks/examples) folder, in the [notebooks](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/GraphNeuralNetworks/notebooks) folder, and in the [tutorials](https://juliagraphs.org/GraphNeuralNetworks.jl/tutorials/) section of the documentation. + +### Data preparation + +We create a dataset consisting in multiple random graphs and associated data features. + +```julia +using GraphNeuralNetworks, Flux, CUDA, Statistics, MLUtils +using Flux: DataLoader + +all_graphs = GNNGraph[] + +for _ in 1:1000 + g = rand_graph(10, 40, + ndata=(; x = randn(Float32, 16,10)), # Input node features + gdata=(; y = randn(Float32))) # Regression target + push!(all_graphs, g) +end +``` + +### Model building -Usage examples can be found in the [examples](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/GraphNeuralNetworks/examples) and in the [notebooks](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/GraphNeuralNetworks/notebooks) folder. Also, make sure to read the [documentation](https://juliagraphs.org/GraphNeuralNetworks.jl/graphneuralnetworks/) for a comprehensive introduction to the library and the [tutorials](https://juliagraphs.org/GraphNeuralNetworks.jl/tutorials/). +We concisely define our model as a [`GraphNeuralNetworks.GNNChain`](@ref) containing two graph convolutional layers. If CUDA is available, our model will live on the gpu. +```julia +device = CUDA.functional() ? Flux.gpu : Flux.cpu; + +model = GNNChain(GCNConv(16 => 64), + BatchNorm(64), # Apply batch normalization on node features (nodes dimension is batch dimension) + x -> relu.(x), + GCNConv(64 => 64, relu), + GlobalPool(mean), # Aggregate node-wise features into graph-wise features + Dense(64, 1)) |> device + +opt = Flux.setup(Adam(1f-4), model) +``` + +### Training + +Finally, we use a standard Flux training pipeline to fit our dataset. +We use Flux's `DataLoader` to iterate over mini-batches of graphs +that are glued together into a single `GNNGraph` using the `MLUtils.batch` method. This is what happens under the hood when creating a `DataLoader` with the +`collate=true` option. + +```julia +train_graphs, test_graphs = MLUtils.splitobs(all_graphs, at=0.8) + +train_loader = DataLoader(train_graphs, + batchsize=32, shuffle=true, collate=true) +test_loader = DataLoader(test_graphs, + batchsize=32, shuffle=false, collate=true) + +loss(model, g::GNNGraph) = mean((vec(model(g, g.x)) - g.y).^2) + +loss(model, loader) = mean(loss(model, g |> device) for g in loader) + +for epoch in 1:100 + for g in train_loader + g = g |> device + grad = gradient(model -> loss(model, g), model) + Flux.update!(opt, model, grad[1]) + end + + @info (; epoch, train_loss=loss(model, train_loader), test_loss=loss(model, test_loader)) +end +``` + +# Google Summer of Code + +Potential candidates to Google Summer of Code's scholarships can find out about the available projects involving GraphNeuralNetworks.jl on the [dedicated page](https://julialang.org/jsoc/gsoc/gnn/) in the Julia Language website. + ## Citing If you use GraphNeuralNetworks.jl in a scientific publication, we would appreciate the following reference: @@ -57,7 +121,3 @@ GraphNeuralNetworks.jl is largely inspired by [PyTorch Geometric](https://pytorc and [GeometricFlux.jl](https://fluxml.ai/GeometricFlux.jl/stable/). - - - - diff --git a/GraphNeuralNetworks/docs/src/other/dummy.md b/GraphNeuralNetworks/docs/src/other/dummy.md new file mode 100644 index 000000000..e69de29bb diff --git a/GraphNeuralNetworks/test/layers/conv.jl b/GraphNeuralNetworks/test/layers/conv.jl index 2167cf47a..5187d9b63 100644 --- a/GraphNeuralNetworks/test/layers/conv.jl +++ b/GraphNeuralNetworks/test/layers/conv.jl @@ -102,9 +102,20 @@ end l = ChebConv(D_IN => D_OUT, k) for g in TEST_GRAPHS has_isolated_nodes(g) && continue - g.graph isa AbstractSparseMatrix && continue - @test size(l(g, g.x)) == (D_OUT, g.num_nodes) - test_gradients(l, g, g.x, rtol = RTOL_LOW, test_gpu = true, compare_finite_diff = false) + + broken = gpu_backend() == "AMDGPU" + @test size(l(g, g.x)) == (D_OUT, g.num_nodes) broken=broken + + if gpu_backend() == "AMDGPU" + broken = true + elseif gpu_backend() == "CUDA" && get_graph_type(g) == :sparse + broken = true + else + broken = false + end + @test test_gradients( + l, g, g.x, rtol = RTOL_LOW, test_gpu = true, compare_finite_diff = false + ) broken=broken end end diff --git a/GraphNeuralNetworks/test/test_module.jl b/GraphNeuralNetworks/test/test_module.jl index fa50e5821..c3ed96f05 100644 --- a/GraphNeuralNetworks/test/test_module.jl +++ b/GraphNeuralNetworks/test/test_module.jl @@ -38,14 +38,19 @@ using SparseArrays # from Base export mean, randn, SparseArrays, AbstractSparseMatrix -# from other packages -export Flux, gradient, Dense, Chain, relu, random_regular_graph, erdos_renyi, - BatchNorm, LayerNorm, Dropout, Parallel +# from Flux.jl +export Flux, gradient, Dense, Chain, relu, + BatchNorm, LayerNorm, Dropout, Parallel, + gpu_device, cpu_device, get_device, + CPUDevice, CUDADevice, AMDGPUDevice, MetalDevice + +# from Graphs.jl +export random_regular_graph, erdos_renyi # from this module export D_IN, D_OUT, GRAPH_TYPES, TEST_GRAPHS, test_gradients, finitediff_withgradient, - check_equal_leaves + check_equal_leaves, gpu_backend const D_IN = 3 @@ -178,5 +183,19 @@ TEST_GRAPHS = [generate_test_graphs(:coo)..., generate_test_graphs(:dense)..., generate_test_graphs(:sparse)...] + +function gpu_backend() + dev = gpu_device() + if dev isa CUDADevice + return "CUDA" + elseif dev isa AMDGPUDevice + return "AMDGPU" + elseif dev isa MetalDevice + return "Metal" + else + return "Unknown" + end +end + end # testmodule diff --git a/docs/make-multi.jl b/docs/make-multi.jl index 268a508d8..43a9ff626 100644 --- a/docs/make-multi.jl +++ b/docs/make-multi.jl @@ -15,17 +15,17 @@ docs = [ MultiDocumenter.MultiDocRef( upstream = joinpath(dirname(@__DIR__),"GraphNeuralNetworks", "docs", "build"), path = "graphneuralnetworks", - name = "GraphNeuralNetworks", + name = "GraphNeuralNetworks.jl", fix_canonical_url = false), MultiDocumenter.MultiDocRef( upstream = joinpath(dirname(@__DIR__), "GNNGraphs", "docs", "build"), path = "gnngraphs", - name = "GNNGraphs", + name = "GNNGraphs.jl", fix_canonical_url = false), MultiDocumenter.MultiDocRef( upstream = joinpath(dirname(@__DIR__), "GNNlib", "docs", "build"), path = "gnnlib", - name = "GNNlib", + name = "GNNlib.jl", fix_canonical_url = false), MultiDocumenter.MultiDocRef( upstream = joinpath(dirname(@__DIR__), "GNNLux", "docs", "build"),