diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml deleted file mode 100644 index ec69b388d..000000000 --- a/.github/workflows/docs.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: Documentation - -on: - push: - branches: - - master - tags: '*' - pull_request: - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: julia-actions/setup-julia@latest - with: - version: '1.10.4' - - name: Install dependencies - run: julia --project=GraphNeuralNetworks/docs/ -e ' - using Pkg; - Pkg.develop([PackageSpec(path=joinpath(pwd(), "GraphNeuralNetworks")), - PackageSpec(path=joinpath(pwd(), "GNNGraphs")), - PackageSpec(path=joinpath(pwd(), "GNNlib"))]); - Pkg.instantiate();' - - name: Build and deploy - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token - DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} - run: julia --project=GraphNeuralNetworks/docs/ GraphNeuralNetworks/docs/make.jl diff --git a/.github/workflows/multidocs.yml b/.github/workflows/multidocs.yml new file mode 100644 index 000000000..3d2759538 --- /dev/null +++ b/.github/workflows/multidocs.yml @@ -0,0 +1,113 @@ +name: MultiDocumentation + +on: + push: + branches: + - master + tags: '*' + pull_request: + +jobs: + build_multidocs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v2 + with: + version: '1.10.5' + - uses: julia-actions/cache@v2 + + - name: Set up + run: git config --global init.defaultBranch master + + # Build GNNGraphs docs + - name: Install dependencies for GNNGraphs + run: + julia --project=GNNGraphs/docs/ -e ' + using Pkg; + Pkg.develop(PackageSpec(path=joinpath(pwd(), "GNNGraphs"))); + Pkg.instantiate();' + - name: Build GNNGraphs docs + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token + DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} + run: julia --project=GNNGraphs/docs/ GNNGraphs/docs/make.jl + + # Build GNNlib docs + - name: Install dependencies for GNNlib + run: julia --project=GNNlib/docs/ -e 'using Pkg; Pkg.instantiate();' + - name: Build GNNlib docs + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token + DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} + run: julia --project=GNNlib/docs/ GNNlib/docs/make.jl + + # Build GNNLux docs + - name: Install dependencies for GNNLux + run: julia --project=GNNLux/docs/ -e ' + using Pkg; + Pkg.develop(PackageSpec(path=joinpath(pwd(), "GNNLux"))); + Pkg.instantiate();' + - name: Build GNNLux docs + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token + DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} + run: julia --project=GNNLux/docs/ GNNLux/docs/make.jl + + # Build GraphNeuralNetworks docs + - name: Install dependencies for GraphNeuralNetworks + run: julia --project=GraphNeuralNetworks/docs/ -e ' + using Pkg; + Pkg.develop(PackageSpec(path=joinpath(pwd(), "GraphNeuralNetworks"))); + Pkg.instantiate();' + - name: Build GraphNeuralNetworks docs + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token + DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} + run: julia --project=GraphNeuralNetworks/docs/ GraphNeuralNetworks/docs/make.jl + + # Build multidocs + - name: Install dependencies for main docs + run: julia --project=GraphNeuralNetworks/docs/ -e ' + using Pkg; + Pkg.develop(PackageSpec(path=joinpath(pwd(), "GraphNeuralNetworks"))); + Pkg.instantiate();' + - name: Build main docs + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token + DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} + run: julia --project=GraphNeuralNetworks/docs/make.jl + + # Build tutorials + - name: Install dependencies for tutorials + run: julia --project=tutorials/docs/ -e 'using Pkg; Pkg.instantiate();' + - name: Build tutorials + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token + DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} + run: julia --project=tutorials/docs/ tutorials/docs/make.jl + + # Build and deploy multidocs + - name: Install dependencies for multidocs + run: julia --project=docs/ -e ' + using Pkg; + Pkg.develop([PackageSpec(path=joinpath(pwd(), "GraphNeuralNetworks")), + PackageSpec(path=joinpath(pwd(), "GNNGraphs")), + PackageSpec(path=joinpath(pwd(), "GNNlib")), + PackageSpec(path=joinpath(pwd(), "GNNLux"))]); + Pkg.instantiate();' + - name: Check if objects.inv exists for GraphNeuralNetworks + run: | + if [ -f GraphNeuralNetworks/docs/build/objects.inv ]; then + echo "GraphNeuralNetworks: objects.inv exists." + else + echo "GraphNeuralNetworks: objects.inv does not exist!" && exit 1 + fi + - name: Build and deploy multidocs + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token + DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} + run: | + git config user.name github-actions + git config user.email github-actions@github.com + julia --project=docs/ docs/make-multi.jl \ No newline at end of file diff --git a/.gitignore b/.gitignore index 91820619c..b40b4d732 100644 --- a/.gitignore +++ b/.gitignore @@ -5,9 +5,14 @@ *.swp *.swo Manifest.toml -/docs/build/ .vscode LocalPreferences.toml .DS_Store docs/src/democards/gridtheme.css -test.jl \ No newline at end of file +test.jl +docs/build +GNNGraphs/docs/build +GNNlib/docs/build +GNNLux/docs/build +GraphNeuralNetworks/docs/build +tutorials/docs/build \ No newline at end of file diff --git a/GNNGraphs/docs/Project.toml b/GNNGraphs/docs/Project.toml new file mode 100644 index 000000000..c26fcc9b2 --- /dev/null +++ b/GNNGraphs/docs/Project.toml @@ -0,0 +1,6 @@ +[deps] +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +DocumenterInterLinks = "d12716ef-a0f6-4df4-a9f1-a5a34e75c656" +GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c" +Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" +LiveServer = "16fef848-5104-11e9-1b77-fb7a48bbb589" diff --git a/GNNGraphs/docs/make.jl b/GNNGraphs/docs/make.jl new file mode 100644 index 000000000..2fc0748c8 --- /dev/null +++ b/GNNGraphs/docs/make.jl @@ -0,0 +1,32 @@ +using Documenter +using DocumenterInterLinks +using GNNGraphs +import Graphs +using Graphs: induced_subgraph + +assets=[] +prettyurls = get(ENV, "CI", nothing) == "true" +mathengine = MathJax3() + + +makedocs(; + modules = [GNNGraphs], + doctest = false, + clean = true, + format = Documenter.HTML(; mathengine, prettyurls, assets = assets, size_threshold=nothing), + sitename = "GNNGraphs.jl", + pages = ["Home" => "index.md", + "Graphs" => ["gnngraph.md", "heterograph.md", "temporalgraph.md"], + "Datasets" => "datasets.md", + "API Reference" => [ + "GNNGraph" => "api/gnngraph.md", + "GNNHeteroGraph" => "api/heterograph.md", + "TemporalSnapshotsGNNGraph" => "api/temporalgraph.md", + ], + ] + ) + +deploydocs(;repo = "github.com/JuliaGraphs/GraphNeuralNetworks.jl.git", +devbranch = "master", +push_preview = true, +dirname = "GNNGraphs") \ No newline at end of file diff --git a/GraphNeuralNetworks/docs/src/api/gnngraph.md b/GNNGraphs/docs/src/api/gnngraph.md similarity index 92% rename from GraphNeuralNetworks/docs/src/api/gnngraph.md rename to GNNGraphs/docs/src/api/gnngraph.md index f708c3840..088d059a4 100644 --- a/GraphNeuralNetworks/docs/src/api/gnngraph.md +++ b/GNNGraphs/docs/src/api/gnngraph.md @@ -4,7 +4,7 @@ CurrentModule = GNNGraphs # GNNGraph -Documentation page for the graph type `GNNGraph` provided by GraphNeuralNetworks.jl and related methods. +Documentation page for the graph type `GNNGraph` provided by GNNGraphs.jl and related methods. Besides the methods documented here, one can rely on the large set of functionalities given by [Graphs.jl](https://github.com/JuliaGraphs/Graphs.jl) thanks to the fact diff --git a/GNNGraphs/docs/src/api/heterograph.md b/GNNGraphs/docs/src/api/heterograph.md new file mode 100644 index 000000000..3734d757b --- /dev/null +++ b/GNNGraphs/docs/src/api/heterograph.md @@ -0,0 +1,17 @@ +# Heterogeneous Graphs + + +## GNNHeteroGraph +Documentation page for the type `GNNHeteroGraph` representing heterogeneous graphs, where nodes and edges can have different types. + + +```@autodocs +Modules = [GNNGraphs] +Pages = ["gnnheterograph.jl"] +Private = false +``` + +```@docs +Graphs.has_edge(::GNNHeteroGraph, ::Tuple{Symbol, Symbol, Symbol}, ::Integer, ::Integer) +``` + diff --git a/GraphNeuralNetworks/docs/src/api/temporalgraph.md b/GNNGraphs/docs/src/api/temporalgraph.md similarity index 100% rename from GraphNeuralNetworks/docs/src/api/temporalgraph.md rename to GNNGraphs/docs/src/api/temporalgraph.md diff --git a/GNNGraphs/docs/src/datasets.md b/GNNGraphs/docs/src/datasets.md new file mode 100644 index 000000000..60477d95e --- /dev/null +++ b/GNNGraphs/docs/src/datasets.md @@ -0,0 +1,10 @@ +# Datasets + +GNNGraphs.jl doesn't come with its own datasets, but leverages those available in the Julia (and non-Julia) ecosystem. In particular, the [examples in the GraphNeuralNetworks.jl repository](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/examples) make use of the [MLDatasets.jl](https://github.com/JuliaML/MLDatasets.jl) package. There you will find common graph datasets such as Cora, PubMed, Citeseer, TUDataset and [many others](https://juliaml.github.io/MLDatasets.jl/dev/datasets/graphs/). +For graphs with static structures and temporal features, datasets such as METRLA, PEMSBAY, ChickenPox, and WindMillEnergy are available. For graphs featuring both temporal structures and temporal features, the TemporalBrains dataset is suitable. + +GraphNeuralNetworks.jl provides the [`mldataset2gnngraph`](@ref) method for interfacing with MLDatasets.jl. + +```@docs +mldataset2gnngraph +``` diff --git a/GraphNeuralNetworks/docs/src/gnngraph.md b/GNNGraphs/docs/src/gnngraph.md similarity index 95% rename from GraphNeuralNetworks/docs/src/gnngraph.md rename to GNNGraphs/docs/src/gnngraph.md index cfa3a2008..c62e279fa 100644 --- a/GraphNeuralNetworks/docs/src/gnngraph.md +++ b/GNNGraphs/docs/src/gnngraph.md @@ -1,6 +1,6 @@ -# Working with GNNGraph +# Static Graphs -The fundamental graph type in GraphNeuralNetworks.jl is the [`GNNGraph`](@ref). +The fundamental graph type in GNNGraphs.jl is the [`GNNGraph`](@ref). A GNNGraph `g` is a directed graph with nodes labeled from 1 to `g.num_nodes`. The underlying implementation allows for efficient application of graph neural network operators, gpu movement, and storage of node/edge/graph related feature arrays. @@ -12,7 +12,7 @@ therefore it supports most functionality from that library. A GNNGraph can be created from several different data sources encoding the graph topology: ```julia -using GraphNeuralNetworks, Graphs, SparseArrays +using GNNGraphs, Graphs, SparseArrays # Construct a GNNGraph from from a Graphs.jl's graph @@ -124,7 +124,7 @@ g′.e ## Edge weights It is common to denote scalar edge features as edge weights. The `GNNGraph` has specific support -for edge weights: they can be stored as part of internal representations of the graph (COO or adjacency matrix). Some graph convolutional layers, most notably the [`GCNConv`](@ref), can use the edge weights to perform weighted sums over the nodes' neighborhoods. +for edge weights: they can be stored as part of internal representations of the graph (COO or adjacency matrix). Some graph convolutional layers, most notably the `GCNConv`, can use the edge weights to perform weighted sums over the nodes' neighborhoods. ```julia julia> source = [1, 1, 2, 2, 3, 3]; @@ -233,7 +233,7 @@ Moreover, a `GNNGraph` can be easily constructed from a `Graphs.Graph` or a `Gra ```julia julia> import Graphs -julia> using GraphNeuralNetworks +julia> using GNNGraphs # A Graphs.jl undirected graph julia> gu = Graphs.erdos_renyi(10, 20) diff --git a/GraphNeuralNetworks/docs/src/heterograph.md b/GNNGraphs/docs/src/heterograph.md similarity index 96% rename from GraphNeuralNetworks/docs/src/heterograph.md rename to GNNGraphs/docs/src/heterograph.md index c05b33943..2347b5844 100644 --- a/GraphNeuralNetworks/docs/src/heterograph.md +++ b/GNNGraphs/docs/src/heterograph.md @@ -6,7 +6,7 @@ Relations such as `:rate` or `:like` can connect nodes of different types. We ca Different node/edge types can store different groups of features and this makes heterographs a very flexible modeling tools -and data containers. In GraphNeuralNetworks.jl heterographs are implemented in +and data containers. In GNNGraphs.jl heterographs are implemented in the type [`GNNHeteroGraph`](@ref). @@ -137,4 +137,4 @@ end ## Graph convolutions on heterographs -See [`HeteroGraphConv`](@ref) for how to perform convolutions on heterogeneous graphs. +See `HeteroGraphConv` for how to perform convolutions on heterogeneous graphs. diff --git a/GNNGraphs/docs/src/index.md b/GNNGraphs/docs/src/index.md new file mode 100644 index 000000000..fc64196cb --- /dev/null +++ b/GNNGraphs/docs/src/index.md @@ -0,0 +1,15 @@ +# GNNGraphs.jl + +GNNGraphs.jl is a package that provides graph data structures and helper functions specifically designed for working with graph neural networks. This package allows to store not only the graph structure, but also features associated with nodes, edges, and the graph itself. It is the core foundation for the GNNlib, GraphNeuralNetworks, and GNNLux packages. + +It supports three types of graphs: + +- **Static graph** is the basic graph type represented by [`GNNGraph`](@ref), where each node and edge can have associated features. This type of graph is used in typical graph neural network applications, where neural networks operate on both the structure of the graph and the features stored in it. It can be used to represent a graph where the structure does not change over time, but the features of the nodes and edges can change over time. + +- **Heterogeneous graph** is a graph that supports multiple types of nodes and edges, and is represented by [`GNNHeteroGraph`](@ref). Each type can have its own properties and relationships. This is useful in scenarios with different entities and interactions, such as in citation graphs or multi-relational data. + +- **Temporal graph** is a graph that changes over time, and is represented by [`TemporalSnapshotsGNNGraph`](@ref). Edges and features can change dynamically. This type of graph is useful for applications that involve tracking time-dependent relationships, such as social networks. + + + +This package depends on the package [Graphs.jl] (https://github.com/JuliaGraphs/Graphs.jl). \ No newline at end of file diff --git a/GraphNeuralNetworks/docs/src/temporalgraph.md b/GNNGraphs/docs/src/temporalgraph.md similarity index 89% rename from GraphNeuralNetworks/docs/src/temporalgraph.md rename to GNNGraphs/docs/src/temporalgraph.md index f90b73ff4..560cfa8d6 100644 --- a/GraphNeuralNetworks/docs/src/temporalgraph.md +++ b/GNNGraphs/docs/src/temporalgraph.md @@ -1,6 +1,6 @@ # Temporal Graphs -Temporal Graphs are graphs with time varying topologies and node features. In GraphNeuralNetworks.jl temporal graphs with fixed number of nodes over time are supported by the [`TemporalSnapshotsGNNGraph`](@ref) type. +Temporal Graphs are graphs with time varying topologies and features. In GNNGraphs.jl, temporal graphs with fixed number of nodes over time are supported by the [`TemporalSnapshotsGNNGraph`](@ref) type. ## Creating a TemporalSnapshotsGNNGraph @@ -91,7 +91,7 @@ GNNGraph: ``` ## Data Features -A temporal graph can stode global feautre for the entire time series in the `tgdata` filed. +A temporal graph can store global feature for the entire time series in the `tgdata` filed. Also, each snapshot can store node, edge, and graph features in the `ndata`, `edata`, and `gdata` fields, respectively. ```jldoctest @@ -126,10 +126,10 @@ julia> [g.x for g in tg.snapshots]; # same vector as above, now accessing ## Graph convolutions on TemporalSnapshotsGNNGraph -A graph convolutional layer can be applied to each snapshot independently, in the next example we apply a `GINConv` layer to each snapshot of a `TemporalSnapshotsGNNGraph`. The list of compatible graph convolution layers can be found [here](api/conv.md). +A graph convolutional layer can be applied to each snapshot independently, in the next example we apply a `GINConv` layer to each snapshot of a `TemporalSnapshotsGNNGraph`. ```jldoctest -julia> using GraphNeuralNetworks, Flux +julia> using GNNGraphs, Flux julia> snapshots = [rand_graph(10, 20; ndata = rand(3, 10)), rand_graph(10, 14; ndata = rand(3, 10))]; diff --git a/GNNLux/docs/Project.toml b/GNNLux/docs/Project.toml new file mode 100644 index 000000000..dbb31551d --- /dev/null +++ b/GNNLux/docs/Project.toml @@ -0,0 +1,5 @@ +[deps] +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +GNNLux = "e8545f4d-a905-48ac-a8c4-ca114b98986d" +GNNlib = "a6a84749-d869-43f8-aacc-be26a1996e48" +LiveServer = "16fef848-5104-11e9-1b77-fb7a48bbb589" diff --git a/GNNLux/docs/make.jl b/GNNLux/docs/make.jl new file mode 100644 index 000000000..0914a3f9f --- /dev/null +++ b/GNNLux/docs/make.jl @@ -0,0 +1,28 @@ +using Documenter +using GNNlib +using GNNLux + + + +assets=[] +prettyurls = get(ENV, "CI", nothing) == "true" +mathengine = MathJax3() + + +makedocs(; + modules = [GNNLux], + doctest = false, + clean = true, + format = Documenter.HTML(; mathengine, prettyurls, assets = assets, size_threshold=nothing), + sitename = "GNNLux.jl", + pages = ["Home" => "index.md", + "Basic" => "api/basic.md"], + ) + + + + +deploydocs(;repo = "github.com/JuliaGraphs/GraphNeuralNetworks.jl.git", +devbranch = "master", +push_preview = true, +dirname = "GNNLux") \ No newline at end of file diff --git a/GNNLux/docs/src/api/basic.md b/GNNLux/docs/src/api/basic.md new file mode 100644 index 000000000..2242745d6 --- /dev/null +++ b/GNNLux/docs/src/api/basic.md @@ -0,0 +1,8 @@ +```@meta +CurrentModule = GNNLux +``` + +## GNNLayer +```@docs +GNNLux.GNNLayer +``` \ No newline at end of file diff --git a/GNNLux/docs/src/index.md b/GNNLux/docs/src/index.md new file mode 100644 index 000000000..6fa95c3ad --- /dev/null +++ b/GNNLux/docs/src/index.md @@ -0,0 +1,5 @@ +# GNNLux.jl + +GNNLux.jl is a work-in-progress package that implements stateless graph convolutional layers, fully compatible with the [Lux.jl](https://lux.csail.mit.edu/stable/) machine learning framework. It is built on top of the GNNGraphs.jl, GNNlib.jl, and Lux.jl packages. + +The full documentation will be available soon. \ No newline at end of file diff --git a/GNNLux/src/layers/basic.jl b/GNNLux/src/layers/basic.jl index ba12de728..6b4763459 100644 --- a/GNNLux/src/layers/basic.jl +++ b/GNNLux/src/layers/basic.jl @@ -4,7 +4,7 @@ An abstract type from which graph neural network layers are derived. It is Derived from Lux's `AbstractLuxLayer` type. -See also [`GNNChain`](@ref GNNLux.GNNChain). +See also `GNNChain`. """ abstract type GNNLayer <: AbstractLuxLayer end diff --git a/GNNlib/docs/Project.toml b/GNNlib/docs/Project.toml new file mode 100644 index 000000000..5aa458b91 --- /dev/null +++ b/GNNlib/docs/Project.toml @@ -0,0 +1,7 @@ +[deps] +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +DocumenterInterLinks = "d12716ef-a0f6-4df4-a9f1-a5a34e75c656" +GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c" +GNNlib = "a6a84749-d869-43f8-aacc-be26a1996e48" +Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" +LiveServer = "16fef848-5104-11e9-1b77-fb7a48bbb589" diff --git a/GNNlib/docs/make.jl b/GNNlib/docs/make.jl new file mode 100644 index 000000000..ff57a33b6 --- /dev/null +++ b/GNNlib/docs/make.jl @@ -0,0 +1,42 @@ +using Documenter +using GNNlib +using GNNGraphs +using DocumenterInterLinks + + +assets=[] +prettyurls = get(ENV, "CI", nothing) == "true" +mathengine = MathJax3() + +interlinks = InterLinks( + "NNlib" => "https://fluxml.ai/NNlib.jl/stable/", + "GNNGraphs" => ("https://carlolucibello.github.io/GraphNeuralNetworks.jl/GNNGraphs/", joinpath(dirname(dirname(@__DIR__)), "GNNGraphs", "docs", "build", "objects.inv"))) + + +makedocs(; + modules = [GNNlib], + doctest = false, + clean = true, + plugins = [interlinks], + format = Documenter.HTML(; mathengine, prettyurls, assets = assets, size_threshold=nothing), + sitename = "GNNlib.jl", + pages = ["Home" => "index.md", + "Message Passing" => "messagepassing.md", + + "API Reference" => [ + + "Message Passing" => "api/messagepassing.md", + + "Utils" => "api/utils.md", + ] + + ] + ) + + + + +deploydocs(;repo = "github.com/JuliaGraphs/GraphNeuralNetworks.jl.git", +devbranch = "master", +push_preview = true, +dirname = "GNNlib") \ No newline at end of file diff --git a/GraphNeuralNetworks/docs/src/api/messagepassing.md b/GNNlib/docs/src/api/messagepassing.md similarity index 91% rename from GraphNeuralNetworks/docs/src/api/messagepassing.md rename to GNNlib/docs/src/api/messagepassing.md index aba1e0bba..03b50914e 100644 --- a/GraphNeuralNetworks/docs/src/api/messagepassing.md +++ b/GNNlib/docs/src/api/messagepassing.md @@ -1,5 +1,5 @@ ```@meta -CurrentModule = GraphNeuralNetworks +CurrentModule = GNNlib ``` # Message Passing diff --git a/GraphNeuralNetworks/docs/src/api/utils.md b/GNNlib/docs/src/api/utils.md similarity index 66% rename from GraphNeuralNetworks/docs/src/api/utils.md rename to GNNlib/docs/src/api/utils.md index 69a723874..c34861167 100644 --- a/GraphNeuralNetworks/docs/src/api/utils.md +++ b/GNNlib/docs/src/api/utils.md @@ -1,5 +1,5 @@ ```@meta -CurrentModule = GraphNeuralNetworks +CurrentModule = GNNlib ``` # Utility Functions @@ -17,18 +17,18 @@ Pages = ["utils.md"] ### Graph-wise operations ```@docs -GraphNeuralNetworks.reduce_nodes -GraphNeuralNetworks.reduce_edges -GraphNeuralNetworks.softmax_nodes -GraphNeuralNetworks.softmax_edges -GraphNeuralNetworks.broadcast_nodes -GraphNeuralNetworks.broadcast_edges +reduce_nodes +reduce_edges +softmax_nodes +softmax_edges +broadcast_nodes +broadcast_edges ``` ### Neighborhood operations ```@docs -GraphNeuralNetworks.softmax_edge_neighbors +softmax_edge_neighbors ``` ### NNlib diff --git a/GNNlib/docs/src/index.md b/GNNlib/docs/src/index.md new file mode 100644 index 000000000..d1668b933 --- /dev/null +++ b/GNNlib/docs/src/index.md @@ -0,0 +1,6 @@ +# GNNlib.jl + +GNNlib.jl is a package that provides the implementation of the basic message passing functions and +functional implementation of graph convolutional layers, which are used to build graph neural networks in both the Flux.jl and Lux.jl machine learning frameworks, created in the GraphNeuralNetworks.jl and GNNLux.jl packages, respectively. + +This package depends on GNNGraphs.jl and NNlib.jl, and is primarily intended for developers looking to create new GNN architectures. For most users, the higher-level GraphNeuralNetworks.jl and GNNLux.jl packages are recommended. \ No newline at end of file diff --git a/GraphNeuralNetworks/docs/src/messagepassing.md b/GNNlib/docs/src/messagepassing.md similarity index 91% rename from GraphNeuralNetworks/docs/src/messagepassing.md rename to GNNlib/docs/src/messagepassing.md index 7051d6cb4..954fb9dd2 100644 --- a/GraphNeuralNetworks/docs/src/messagepassing.md +++ b/GNNlib/docs/src/messagepassing.md @@ -16,7 +16,7 @@ and to ``\gamma_x`` and ``\gamma_e`` as to the node update and edge update funct respectively. The aggregation ``\square`` is over the neighborhood ``N(i)`` of node ``i``, and it is usually equal either to ``\sum``, to `max` or to a `mean` operation. -In GraphNeuralNetworks.jl, the message passing mechanism is exposed by the [`propagate`](@ref) function. +In GNNlib.jl, the message passing mechanism is exposed by the [`propagate`](@ref) function. [`propagate`](@ref) takes care of materializing the node features on each edge, applying the message function, performing the aggregation, and returning ``\bar{\mathbf{m}}``. It is then left to the user to perform further node and edge updates, @@ -39,7 +39,7 @@ and [`NNlib.scatter`](@ref) methods. The function [`apply_edges`](@ref) can be used to broadcast node data on each edge and produce new edge data. ```julia -julia> using GraphNeuralNetworks, Graphs, Statistics +julia> using GNNlib, Graphs, Statistics julia> g = rand_graph(10, 20) GNNGraph: @@ -90,9 +90,9 @@ julia> degree(g) 1 ``` -### Implementing a custom Graph Convolutional Layer +### Implementing a custom Graph Convolutional Layer using Flux.jl -Let's implement a simple graph convolutional layer using the message passing framework. +Let's implement a simple graph convolutional layer using the message passing framework using the machine learning framework Flux.jl. The convolution reads ```math @@ -134,7 +134,7 @@ function (l::GCN)(g::GNNGraph, x::AbstractMatrix{T}) where T end ``` -See the [`GATConv`](@ref) implementation [here](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/blob/master/src/layers/conv.jl) for a more complex example. +See the `GATConv` implementation [here](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/blob/master/src/layers/conv.jl) for a more complex example. ## Built-in message functions diff --git a/GraphNeuralNetworks/docs/Project.toml b/GraphNeuralNetworks/docs/Project.toml index 60f0e00d0..2f8dc9ee8 100644 --- a/GraphNeuralNetworks/docs/Project.toml +++ b/GraphNeuralNetworks/docs/Project.toml @@ -1,22 +1,4 @@ [deps] -DemoCards = "311a05b2-6137-4a5a-b473-18580a3d38b5" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" DocumenterInterLinks = "d12716ef-a0f6-4df4-a9f1-a5a34e75c656" -Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" -GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c" -GNNlib = "a6a84749-d869-43f8-aacc-be26a1996e48" GraphNeuralNetworks = "cffab07f-9bc2-4db1-8861-388f63bf7694" -Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458" -NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" -Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -Pluto = "c3e4b0f8-55cb-11ea-2926-15256bba5781" -PlutoStaticHTML = "359b1769-a58e-495b-9770-312e911026ad" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" - -[compat] -DemoCards = "0.5.0" -Documenter = "1.5" diff --git a/GraphNeuralNetworks/docs/make.jl b/GraphNeuralNetworks/docs/make.jl index 1f0cb8bbc..56af7ce42 100644 --- a/GraphNeuralNetworks/docs/make.jl +++ b/GraphNeuralNetworks/docs/make.jl @@ -1,58 +1,54 @@ +using Documenter using GraphNeuralNetworks -using GNNGraphs -using Flux -using NNlib -using Graphs -using SparseArrays -using Pluto, PlutoStaticHTML # for tutorials -using Documenter, DemoCards using DocumenterInterLinks -tutorials, tutorials_cb, tutorial_assets = makedemos("tutorials") -assets = [] -isnothing(tutorial_assets) || push!(assets, tutorial_assets) +assets=[] +prettyurls = get(ENV, "CI", nothing) == "true" +mathengine = MathJax3() interlinks = InterLinks( "NNlib" => "https://fluxml.ai/NNlib.jl/stable/", - "Graphs" => "https://juliagraphs.org/Graphs.jl/stable/") - - -DocMeta.setdocmeta!(GraphNeuralNetworks, :DocTestSetup, - :(using GraphNeuralNetworks, Graphs, SparseArrays, NNlib, Flux); - recursive = true) - -prettyurls = get(ENV, "CI", nothing) == "true" -mathengine = MathJax3() + "GNNGraphs" => ("https://carlolucibello.github.io/GraphNeuralNetworks.jl/GNNGraphs/", joinpath(dirname(dirname(@__DIR__)), "GNNGraphs", "docs", "build", "objects.inv")), + "GNNlib" => ("https://carlolucibello.github.io/GraphNeuralNetworks.jl/GNNlib/", joinpath(dirname(dirname(@__DIR__)), "GNNlib", "docs", "build", "objects.inv")) + + ) makedocs(; - modules = [GraphNeuralNetworks, GNNGraphs, GNNlib], + modules = [GraphNeuralNetworks], doctest = false, clean = true, plugins = [interlinks], format = Documenter.HTML(; mathengine, prettyurls, assets = assets, size_threshold=nothing), sitename = "GraphNeuralNetworks.jl", - pages = ["Home" => "index.md", - "Graphs" => ["gnngraph.md", "heterograph.md", "temporalgraph.md"], - "Message Passing" => "messagepassing.md", - "Model Building" => "models.md", - "Datasets" => "datasets.md", - "Tutorials" => tutorials, - "API Reference" => [ - "GNNGraph" => "api/gnngraph.md", - "Basic Layers" => "api/basic.md", - "Convolutional Layers" => "api/conv.md", - "Pooling Layers" => "api/pool.md", - "Message Passing" => "api/messagepassing.md", - "Heterogeneous Graphs" => "api/heterograph.md", - "Temporal Graphs" => "api/temporalgraph.md", - "Samplers" => "api/samplers.md", - "Utils" => "api/utils.md", - ], - "Developer Notes" => "dev.md", - "Summer Of Code" => "gsoc.md", - ]) - -tutorials_cb() - -deploydocs(repo = "github.com/JuliaGraphs/GraphNeuralNetworks.jl.git") + pages = ["Monorepo" => [ + "Home" => "index.md", + "Developer guide" => "dev.md", + "Google Summer of Code" => "gsoc.md", + ], + "GraphNeuralNetworks.jl" =>[ + "Home" => "home.md", + "Models" => "models.md",], + + "API Reference" => [ + + "Basic" => "api/basic.md", + "Convolutional layers" => "api/conv.md", + "Pooling layers" => "api/pool.md", + "Temporal Convolutional layers" => "api/temporalconv.md", + "Hetero Convolutional layers" => "api/heteroconv.md", + "Samplers" => "api/samplers.md", + + + ], + + ], + ) + + + + +deploydocs(;repo = "github.com/JuliaGraphs/GraphNeuralNetworks.jl.git", +devbranch = "master", +push_preview = true, +dirname= "GraphNeuralNetworks") diff --git a/GraphNeuralNetworks/docs/src/api/heteroconv.md b/GraphNeuralNetworks/docs/src/api/heteroconv.md new file mode 100644 index 000000000..969fbde71 --- /dev/null +++ b/GraphNeuralNetworks/docs/src/api/heteroconv.md @@ -0,0 +1,15 @@ +```@meta +CurrentModule = GraphNeuralNetworks +``` + +# Hetero Graph-Convolutional Layers + +Heterogeneous graph convolutions are implemented in the type `HeteroGraphConv`. `HeteroGraphConv` relies on standard graph convolutional layers to perform message passing on the different relations. + +## Docs + +```@autodocs +Modules = [GraphNeuralNetworks] +Pages = ["layers/heteroconv.jl"] +Private = false +``` diff --git a/GraphNeuralNetworks/docs/src/api/heterograph.md b/GraphNeuralNetworks/docs/src/api/heterograph.md deleted file mode 100644 index db03c74a4..000000000 --- a/GraphNeuralNetworks/docs/src/api/heterograph.md +++ /dev/null @@ -1,25 +0,0 @@ -# Hetereogeneous Graphs - - -## GNNHeteroGraph -Documentation page for the type `GNNHeteroGraph` representing heterogeneous graphs, where nodes and edges can have different types. - - -```@autodocs -Modules = [GNNGraphs] -Pages = ["gnnheterograph.jl"] -Private = false -``` - -```@docs -Graphs.has_edge(::GNNHeteroGraph, ::Tuple{Symbol, Symbol, Symbol}, ::Integer, ::Integer) -``` - -## Heterogeneous Graph Convolutions - -Heterogeneous graph convolutions are implemented in the type [`HeteroGraphConv`](@ref). -`HeteroGraphConv` relies on standard graph convolutional layers to perform message passing on the different relations. See the table at [this page](https://carlolucibello.github.io/GraphNeuralNetworks.jl/dev/api/conv/) for the supported layers. - -```@docs -HeteroGraphConv -``` \ No newline at end of file diff --git a/GraphNeuralNetworks/docs/src/assets/schema.png b/GraphNeuralNetworks/docs/src/assets/schema.png new file mode 100644 index 000000000..6496b36df Binary files /dev/null and b/GraphNeuralNetworks/docs/src/assets/schema.png differ diff --git a/GraphNeuralNetworks/docs/src/datasets.md b/GraphNeuralNetworks/docs/src/datasets.md index 8644509c3..050f27b3c 100644 --- a/GraphNeuralNetworks/docs/src/datasets.md +++ b/GraphNeuralNetworks/docs/src/datasets.md @@ -2,8 +2,4 @@ GraphNeuralNetworks.jl doesn't come with its own datasets, but leverages those available in the Julia (and non-Julia) ecosystem. In particular, the [examples in the GraphNeuralNetworks.jl repository](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/examples) make use of the [MLDatasets.jl](https://github.com/JuliaML/MLDatasets.jl) package. There you will find common graph datasets such as Cora, PubMed, Citeseer, TUDataset and [many others](https://juliaml.github.io/MLDatasets.jl/dev/datasets/graphs/). -GraphNeuralNetworks.jl provides the [`mldataset2gnngraph`](@ref) method for interfacing with MLDatasets.jl. - -```@docs -mldataset2gnngraph -``` +GraphNeuralNetworks.jl provides the [`GNNGraphs.mldataset2gnngraph`](@ref) method for interfacing with MLDatasets.jl. \ No newline at end of file diff --git a/GraphNeuralNetworks/docs/src/home.md b/GraphNeuralNetworks/docs/src/home.md new file mode 100644 index 000000000..2ccebefd0 --- /dev/null +++ b/GraphNeuralNetworks/docs/src/home.md @@ -0,0 +1,87 @@ +# GraphNeuralNetworks + +This is the documentation page for [GraphNeuralNetworks.jl](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl), a graph neural network library written in Julia and based on the deep learning framework [Flux.jl](https://github.com/FluxML/Flux.jl). +GraphNeuralNetworks.jl is largely inspired by [PyTorch Geometric](https://pytorch-geometric.readthedocs.io/en/latest/), [Deep Graph Library](https://docs.dgl.ai/), +and [GeometricFlux.jl](https://fluxml.ai/GeometricFlux.jl/stable/). + +Among its features: + +* Implements common graph convolutional layers. +* Supports computations on batched graphs. +* Easy to define custom layers. +* CUDA support. +* Integration with [Graphs.jl](https://github.com/JuliaGraphs/Graphs.jl). +* [Examples](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/examples) of node, edge, and graph level machine learning tasks. + + +## Package overview + +Let's give a brief overview of the package by solving a +graph regression problem with synthetic data. + +Usage examples on real datasets can be found in the [examples](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/examples) folder. + +### Data preparation + +We create a dataset consisting in multiple random graphs and associated data features. + +```julia +using GraphNeuralNetworks, Graphs, Flux, CUDA, Statistics, MLUtils +using Flux: DataLoader + +all_graphs = GNNGraph[] + +for _ in 1:1000 + g = rand_graph(10, 40, + ndata=(; x = randn(Float32, 16,10)), # input node features + gdata=(; y = randn(Float32))) # regression target + push!(all_graphs, g) +end +``` + +### Model building + +We concisely define our model as a [`GraphNeuralNetworks.GNNChain`](@ref) containing two graph convolutional layers. If CUDA is available, our model will live on the gpu. + +```julia +device = CUDA.functional() ? Flux.gpu : Flux.cpu; + +model = GNNChain(GCNConv(16 => 64), + BatchNorm(64), # Apply batch normalization on node features (nodes dimension is batch dimension) + x -> relu.(x), + GCNConv(64 => 64, relu), + GlobalPool(mean), # aggregate node-wise features into graph-wise features + Dense(64, 1)) |> device + +opt = Flux.setup(Adam(1f-4), model) +``` + +### Training + +Finally, we use a standard Flux training pipeline to fit our dataset. +We use Flux's `DataLoader` to iterate over mini-batches of graphs +that are glued together into a single `GNNGraph` using the `MLUtils.batch` method. This is what happens under the hood when creating a `DataLoader` with the +`collate=true` option. + +```julia +train_graphs, test_graphs = MLUtils.splitobs(all_graphs, at=0.8) + +train_loader = DataLoader(train_graphs, + batchsize=32, shuffle=true, collate=true) +test_loader = DataLoader(test_graphs, + batchsize=32, shuffle=false, collate=true) + +loss(model, g::GNNGraph) = mean((vec(model(g, g.x)) - g.y).^2) + +loss(model, loader) = mean(loss(model, g |> device) for g in loader) + +for epoch in 1:100 + for g in train_loader + g = g |> device + grad = gradient(model -> loss(model, g), model) + Flux.update!(opt, model, grad[1]) + end + + @info (; epoch, train_loss=loss(model, train_loader), test_loss=loss(model, test_loader)) +end +``` diff --git a/GraphNeuralNetworks/docs/src/index.md b/GraphNeuralNetworks/docs/src/index.md index d32f75359..39413eef8 100644 --- a/GraphNeuralNetworks/docs/src/index.md +++ b/GraphNeuralNetworks/docs/src/index.md @@ -1,10 +1,10 @@ -# GraphNeuralNetworks +# GraphNeuralNetworks Monorepo This is the documentation page for [GraphNeuralNetworks.jl](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl), a graph neural network library written in Julia and based on the deep learning framework [Flux.jl](https://github.com/FluxML/Flux.jl). GraphNeuralNetworks.jl is largely inspired by [PyTorch Geometric](https://pytorch-geometric.readthedocs.io/en/latest/), [Deep Graph Library](https://docs.dgl.ai/), and [GeometricFlux.jl](https://fluxml.ai/GeometricFlux.jl/stable/). -Among its features: +- `GraphNeuralNetwork.jl`: Package that contains stateful graph convolutional layers based on the machine learning framework [Flux.jl](https://fluxml.ai/Flux.jl/stable/). This is fronted package for Flux users. It depends on GNNlib.jl, GNNGraphs.jl, and Flux.jl packages. * Implements common graph convolutional layers. * Supports computations on batched graphs. @@ -14,74 +14,7 @@ Among its features: * [Examples](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/examples) of node, edge, and graph level machine learning tasks. -## Package overview -Let's give a brief overview of the package by solving a -graph regression problem with synthetic data. Usage examples on real datasets can be found in the [examples](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/examples) folder. -### Data preparation - -We create a dataset consisting in multiple random graphs and associated data features. - -```julia -using GraphNeuralNetworks, Graphs, Flux, CUDA, Statistics, MLUtils -using Flux: DataLoader - -all_graphs = GNNGraph[] - -for _ in 1:1000 - g = rand_graph(10, 40, - ndata=(; x = randn(Float32, 16,10)), # input node features - gdata=(; y = randn(Float32))) # regression target - push!(all_graphs, g) -end -``` - -### Model building - -We concisely define our model as a [`GNNChain`](@ref) containing two graph convolutional layers. If CUDA is available, our model will live on the gpu. - -```julia -device = CUDA.functional() ? Flux.gpu : Flux.cpu; - -model = GNNChain(GCNConv(16 => 64), - BatchNorm(64), # Apply batch normalization on node features (nodes dimension is batch dimension) - x -> relu.(x), - GCNConv(64 => 64, relu), - GlobalPool(mean), # aggregate node-wise features into graph-wise features - Dense(64, 1)) |> device - -opt = Flux.setup(Adam(1f-4), model) -``` - -### Training - -Finally, we use a standard Flux training pipeline to fit our dataset. -We use Flux's `DataLoader` to iterate over mini-batches of graphs -that are glued together into a single `GNNGraph` using the [`Flux.batch`](@ref) method. This is what happens under the hood when creating a `DataLoader` with the -`collate=true` option. - -```julia -train_graphs, test_graphs = MLUtils.splitobs(all_graphs, at=0.8) - -train_loader = DataLoader(train_graphs, - batchsize=32, shuffle=true, collate=true) -test_loader = DataLoader(test_graphs, - batchsize=32, shuffle=false, collate=true) - -loss(model, g::GNNGraph) = mean((vec(model(g, g.x)) - g.y).^2) - -loss(model, loader) = mean(loss(model, g |> device) for g in loader) - -for epoch in 1:100 - for g in train_loader - g = g |> device - grad = gradient(model -> loss(model, g), model) - Flux.update!(opt, model, grad[1]) - end - - @info (; epoch, train_loss=loss(model, train_loader), test_loss=loss(model, test_loader)) -end -``` diff --git a/GraphNeuralNetworks/docs/src/models.md b/GraphNeuralNetworks/docs/src/models.md index e4c65e9e5..4a7876390 100644 --- a/GraphNeuralNetworks/docs/src/models.md +++ b/GraphNeuralNetworks/docs/src/models.md @@ -6,7 +6,7 @@ their models. In what follows, we discuss two different styles for model creation: the *explicit modeling* style, more verbose but more flexible, -and the *implicit modeling* style based on [`GNNChain`](@ref), more concise but less flexible. +and the *implicit modeling* style based on [`GraphNeuralNetworks.GNNChain`](@ref), more concise but less flexible. ## Explicit modeling @@ -62,11 +62,11 @@ grad = gradient(model -> sum(model(g, X)), model) ## Implicit modeling with GNNChains While very flexible, the way in which we defined `GNN` model definition in last section is a bit verbose. -In order to simplify things, we provide the [`GNNChain`](@ref) type. It is very similar +In order to simplify things, we provide the [`GraphNeuralNetworks.GNNChain`](@ref) type. It is very similar to Flux's well known `Chain`. It allows to compose layers in a sequential fashion as Chain does, propagating the output of each layer to the next one. In addition, `GNNChain` handles propagates the input graph as well, providing it as a first argument -to layers subtyping the [`GNNLayer`](@ref) abstract type. +to layers subtyping the [`GraphNeuralNetworks.GNNLayer`](@ref) abstract type. Using `GNNChain`, the previous example becomes diff --git a/docs/Project.toml b/docs/Project.toml new file mode 100644 index 000000000..8d7c1e6a6 --- /dev/null +++ b/docs/Project.toml @@ -0,0 +1,4 @@ +[deps] +LiveServer = "16fef848-5104-11e9-1b77-fb7a48bbb589" +MultiDocumenter = "87ed4bf0-c935-4a67-83c3-2a03bee4197c" + diff --git a/docs/logo.svg b/docs/logo.svg new file mode 100644 index 000000000..cac604fcd --- /dev/null +++ b/docs/logo.svg @@ -0,0 +1,31 @@ + + \ No newline at end of file diff --git a/docs/make-multi.jl b/docs/make-multi.jl new file mode 100644 index 000000000..28e8f1866 --- /dev/null +++ b/docs/make-multi.jl @@ -0,0 +1,106 @@ +using MultiDocumenter + +for (root, dirs, files) in walkdir(".") + for file in files + filepath = joinpath(root, file) + if islink(filepath) + linktarget = abspath(dirname(filepath), readlink(filepath)) + rm(filepath) + cp(linktarget, filepath; force=true) + end + end +end + +docs = [ + MultiDocumenter.MultiDocRef( + upstream = joinpath(dirname(@__DIR__),"GraphNeuralNetworks", "docs", "build"), + path = "graphneuralnetworks", + name = "GraphNeuralNetworks", + fix_canonical_url = false), + MultiDocumenter.MultiDocRef( + upstream = joinpath(dirname(@__DIR__), "GNNGraphs", "docs", "build"), + path = "gnngraphs", + name = "GNNGraphs", + fix_canonical_url = false), + MultiDocumenter.MultiDocRef( + upstream = joinpath(dirname(@__DIR__), "GNNlib", "docs", "build"), + path = "gnnlib", + name = "GNNlib", + fix_canonical_url = false), + MultiDocumenter.MultiDocRef( + upstream = joinpath(dirname(@__DIR__), "GNNLux", "docs", "build"), + path = "gnnlux", + name = "GNNLux", + fix_canonical_url = false), + MultiDocumenter.MultiDocRef( + upstream = joinpath(dirname(@__DIR__), "tutorials", "docs", "build"), + path = "tutorials", + name = "tutorials", + fix_canonical_url = false), +] + +outpath = joinpath(@__DIR__, "build") + +MultiDocumenter.make( + outpath, + docs; + search_engine = MultiDocumenter.SearchConfig( + index_versions = ["stable"], + engine = MultiDocumenter.FlexSearch + ), + brand_image = MultiDocumenter.BrandImage("", "logo.svg"), + rootpath = "/GraphNeuralNetworks.jl/" +) + +cp(joinpath(@__DIR__, "logo.svg"), + joinpath(outpath, "logo.svg")) + +@warn "Deploying to GitHub as MultiDocumenter" +gitroot = normpath(joinpath(@__DIR__, "..")) +run(`git pull`) + +outbranch = "dep-multidocs" +has_outbranch = true + +status_output = read(`git status --porcelain docs/Project.toml`, String) +if !isempty(status_output) + @info "Restoring docs/Project.toml due to changes." + run(`git restore docs/Project.toml`) +else + @info "No changes detected in docs/Project.toml." +end + +if !success(`git checkout -f $outbranch`) + has_outbranch = false + if !success(`git switch --orphan $outbranch`) + @error "Cannot create new orphaned branch $outbranch." + exit(1) + end +end + +@info "Cleaning up $gitroot." +for file in readdir(gitroot; join = true) + file == "/home/runner/work/GraphNeuralNetworks.jl/GraphNeuralNetworks.jl/docs" && continue + endswith(file, ".git") && continue + rm(file; force = true, recursive = true) +end + +@info "Copying aggregated documentation to $gitroot." +for file in readdir(outpath) + cp(joinpath(outpath, file), joinpath(gitroot, file)) +end + +rm("/home/runner/work/GraphNeuralNetworks.jl/GraphNeuralNetworks.jl/docs"; force = true, recursive = true) + +run(`git add .`) +if success(`git commit -m 'Aggregate documentation'`) + @info "Pushing updated documentation." + if has_outbranch + run(`git push`) + else + run(`git push -u origin $outbranch`) + end + run(`git checkout master`) +else + @info "No changes to aggregated documentation." +end diff --git a/tutorials/docs/Project.toml b/tutorials/docs/Project.toml new file mode 100644 index 000000000..8e1472137 --- /dev/null +++ b/tutorials/docs/Project.toml @@ -0,0 +1,4 @@ +[deps] +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +Pluto = "c3e4b0f8-55cb-11ea-2926-15256bba5781" +PlutoStaticHTML = "359b1769-a58e-495b-9770-312e911026ad" diff --git a/tutorials/docs/make.jl b/tutorials/docs/make.jl new file mode 100644 index 000000000..db271bf89 --- /dev/null +++ b/tutorials/docs/make.jl @@ -0,0 +1,36 @@ +using Documenter + + +assets = [] +prettyurls = get(ENV, "CI", nothing) == "true" +mathengine = MathJax3() + +# interlinks = InterLinks( +# "NNlib" => "https://fluxml.ai/NNlib.jl/stable/", +# "GNNGraphs" => ("https://carlolucibello.github.io/GraphNeuralNetworks.jl/GNNGraphs/", joinpath(dirname(dirname(@__DIR__)), "GNNGraphs", "docs", "build", "objects.inv")), +# "GraphNeuralNetworks" => ("https://carlolucibello.github.io/GraphNeuralNetworks.jl/GraphNeuralNetworks/", joinpath(dirname(dirname(@__DIR__)), "docs", "build", "objects.inv")),) + +makedocs(; + doctest = false, + clean = true, + format = Documenter.HTML(; + mathengine, prettyurls, assets = assets, size_threshold = nothing), + sitename = "Tutorials", + pages = ["Home" => "index.md", + "Introductory tutorials" => [ + "Hands on" => "pluto_output/gnn_intro_pluto.md", + "Node classification" => "pluto_output/node_classification_pluto.md", + "Graph classification" => "pluto_output/graph_classification_pluto.md" + ], + "Temporal graph neural networks" =>[ + "Node autoregression" => "pluto_output/traffic_prediction.md", + "Temporal graph classification" => "pluto_output/temporal_graph_classification_pluto.md" + + ]]) + + + +deploydocs(;repo = "github.com/JuliaGraphs/GraphNeuralNetworks.jl.git", +devbranch = "master", +push_preview = true, +dirname = "tutorials") \ No newline at end of file diff --git a/tutorials/docs/src/index.md b/tutorials/docs/src/index.md new file mode 100644 index 000000000..af1b57997 --- /dev/null +++ b/tutorials/docs/src/index.md @@ -0,0 +1,24 @@ +# Tutorials + +## Introductory tutorials + + +Here are some introductory tutorials to get you started: + +- [Hands-on introduction to Graph Neural Networks](pluto_output/gnn_intro_pluto.md) +- [Node classification with GraphNeuralNetworks.jl](pluto_output/node_classification_pluto.md) +- [Graph classification with GraphNeuralNetworks.jl](pluto_output/graph_classification_pluto.md) + + + +## Temporal graph neural networks tutorials + +Here some tutorials on temporal graph neural networks: + +- [Traffic Prediction using recurrent Temporal Graph Convolutional Network](pluto_output/traffic_prediction.md) + +- [Temporal Graph classification with GraphNeuralNetworks.jl](pluto_output/temporal_graph_classification_pluto.md) + +## Contributions + +If you have a suggestion on adding new tutorials, feel free to create a new issue [here](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/issues/new). Users are invited to contribute demonstrations of their own. If you want to contribute new tutorials and looking for inspiration, checkout these tutorials from [PyTorch Geometric](https://pytorch-geometric.readthedocs.io/en/latest/notes/colabs.html). Please check out existing tutorials for more details. \ No newline at end of file diff --git a/GraphNeuralNetworks/docs/pluto_output/gnn_intro_pluto.md b/tutorials/docs/src/pluto_output/gnn_intro_pluto.md similarity index 70% rename from GraphNeuralNetworks/docs/pluto_output/gnn_intro_pluto.md rename to tutorials/docs/src/pluto_output/gnn_intro_pluto.md index 6bf0d73ff..1174628d6 100644 --- a/GraphNeuralNetworks/docs/pluto_output/gnn_intro_pluto.md +++ b/tutorials/docs/src/pluto_output/gnn_intro_pluto.md @@ -25,8 +25,8 @@
This Pluto notebook is a Julia adaptation of the Pytorch Geometric tutorials that can be found here.
Recently, deep learning on graphs has emerged to one of the hottest research fields in the deep learning community. Here, Graph Neural Networks (GNNs) aim to generalize classical deep learning concepts to irregular structured data (in contrast to images or texts) and to enable neural networks to reason about objects and their relations.
This is done by following a simple neural message passing scheme, where node features \(\mathbf{x}_i^{(\ell)}\) of all nodes \(i \in \mathcal{V}\) in a graph \(\mathcal{G} = (\mathcal{V}, \mathcal{E})\) are iteratively updated by aggregating localized information from their neighbors \(\mathcal{N}(i)\):
$$\mathbf{x}_i^{(\ell + 1)} = f^{(\ell + 1)}_{\theta} \left( \mathbf{x}_i^{(\ell)}, \left\{ \mathbf{x}_j^{(\ell)} : j \in \mathcal{N}(i) \right\} \right)$$
This tutorial will introduce you to some fundamental concepts regarding deep learning on graphs via Graph Neural Networks based on the GraphNeuralNetworks.jl library. GraphNeuralNetworks.jl is an extension library to the popular deep learning framework Flux.jl, and consists of various methods and utilities to ease the implementation of Graph Neural Networks.
Let's first import the packages we need:
GCN((conv1 = GCNConv(34 => 4), conv2 = GCNConv(4 => 4), conv3 = GCNConv(4 => 2), classifier = Dense(2 => 4)))+
GCN((conv1 = GCNConv(34 => 4), conv2 = GCNConv(4 => 4), conv3 = GCNConv(4 => 2), classifier = Dense(2 => 4))) # 182 parameters
_, h = gcn(g, g.ndata.x)
-(Float32[0.017824104 0.0077741514 … -0.049516954 -0.047012385; -0.008411304 0.00414012 … 0.0788404 0.07529551; -0.0069731097 0.0012623081 … 0.049945038 0.047662895; 0.0035474515 0.0027243823 … -0.001492914 -0.0013506437], Float32[-0.019373894 -0.0224004 … -0.04527937 -0.043780304; -0.027381245 -0.016037654 … 0.04697653 0.04436821])+
(Float32[-0.0068139993 0.008728906 … 0.020461287 0.016271798; -0.0019973165 -0.0064561698 … -0.0044912496 -0.004174295; 0.1469301 0.13193016 … -0.06870474 -0.03323521; -0.022454038 -0.0069215773 … 0.025904683 0.018215057], Float32[-0.055850513 -0.03927876 … 0.03876325 0.023417776; -0.11278143 -0.11275233 … 0.03937418 0.014116553])
function visualize_embeddings(h; colors = nothing)
xs = h[1, :] |> vec
@@ -208,7 +208,7 @@ end
visualize_embeddings (generic function with 1 method)
visualize_embeddings(h, colors = labels)
-
+
Remarkably, even before training the weights of our model, the model produces an embedding of nodes that closely resembles the community-structure of the graph. Nodes of the same color (community) are already closely clustered together in the embedding space, although the weights of our model are initialized completely at random and we have not yet performed any training so far! This leads to the conclusion that GNNs introduce a strong inductive bias, leading to similar embeddings for nodes that are close to each other in the input graph.
But can we do better? Let's look at an example on how to train our network parameters based on the knowledge of the community assignments of 4 nodes in the graph (one for each community).
Since everything in our model is differentiable and parameterized, we can add some labels, train the model and observe how the embeddings react. Here, we make use of a semi-supervised or transductive learning procedure: we simply train against one node per class, but are allowed to make use of the complete input graph data.
Training our model is very similar to any other Flux model. In addition to defining our network architecture, we define a loss criterion (here, logitcrossentropy
), and initialize a stochastic gradient optimizer (here, Adam
). After that, we perform multiple rounds of optimization, where each round consists of a forward and backward pass to compute the gradients of our model parameters w.r.t. to the loss derived from the forward pass. If you are not new to Flux, this scheme should appear familiar to you.
Note that our semi-supervised learning scenario is achieved by the following line:
loss = logitcrossentropy(ŷ[:,train_mask], y[:,train_mask])
While we compute node embeddings for all of our nodes, we only make use of the training nodes for computing the loss. Here, this is implemented by filtering the output of the classifier out
and ground-truth labels data.y
to only contain the nodes in the train_mask
.
Let us now start training and see how our node embeddings evolve over time (best experienced by explicitly running the code):
ŷ, emb_final = model(g, g.ndata.x)
-(Float32[7.2331567 7.2313447 … 9.202145 9.188894; 12.7212515 12.735689 … -2.4455047 -2.3414903; -4.593668 -4.6052985 … 7.653345 7.5694675; -8.756303 -8.760008 … -4.8976927 -4.924286], Float32[-0.99999434 -1.0 … -0.9999765 -0.9999995; -0.9980977 -0.9999941 … 0.9964582 0.98278886])+
(Float32[-8.871021 -6.288402 … 7.8817716 7.3984337; 7.873129 5.5748186 … -8.054153 -7.562167; 0.6939411 2.6538918 … 0.1978332 0.633129; 0.42380208 -1.7143326 … -0.14687762 -0.5542332], Float32[-0.99049056 -0.9905237 … 0.99305063 0.87260294; -0.9905631 -0.40585023 … 0.9999852 0.99999404])
# train accuracy
mean(onecold(ŷ[:, train_mask]) .== onecold(y[:, train_mask]))
@@ -248,10 +248,10 @@ mean(onecold(ŷ[:, train_mask]) .== onecold(y[:, train_mask]))
# test accuracy
mean(onecold(ŷ[:, .!train_mask]) .== onecold(y[:, .!train_mask]))
-0.9+
0.8
visualize_embeddings(emb_final, colors = labels)
-
+
As one can see, our 3-layer GCN model manages to linearly separating the communities and classifying most of the nodes correctly.
Furthermore, we did this all with a few lines of code, thanks to the GraphNeuralNetworks.jl which helped us out with data handling and GNN implementations.
begin
using Flux
@@ -43,7 +43,7 @@ end;
-This Pluto notebook is a julia adaptation of the Pytorch Geometric tutorials that can be found here.
In this tutorial session we will have a closer look at how to apply Graph Neural Networks (GNNs) to the task of graph classification. Graph classification refers to the problem of classifying entire graphs (in contrast to nodes), given a dataset of graphs, based on some structural graph properties. Here, we want to embed entire graphs, and we want to embed those graphs in such a way so that they are linearly separable given a task at hand.
The most common task for graph classification is molecular property prediction, in which molecules are represented as graphs, and the task may be to infer whether a molecule inhibits HIV virus replication or not.
The TU Dortmund University has collected a wide range of different graph classification datasets, known as the TUDatasets, which are also accessible via MLDatasets.jl. Let's load and inspect one of the smaller ones, the MUTAG dataset:
This Pluto notebook is a julia adaptation of the Pytorch Geometric tutorials that can be found here.
In this tutorial session we will have a closer look at how to apply Graph Neural Networks (GNNs) to the task of graph classification. Graph classification refers to the problem of classifying entire graphs (in contrast to nodes), given a dataset of graphs, based on some structural graph properties. Here, we want to embed entire graphs, and we want to embed those graphs in such a way so that they are linearly separable given a task at hand.
The most common task for graph classification is molecular property prediction, in which molecules are represented as graphs, and the task may be to infer whether a molecule inhibits HIV virus replication or not.
The TU Dortmund University has collected a wide range of different graph classification datasets, known as the TUDatasets, which are also accessible via MLDatasets.jl. Let's load and inspect one of the smaller ones, the MUTAG dataset:
dataset = TUDataset("MUTAG")
dataset TUDataset: @@ -102,7 +102,7 @@ end
We have some useful utilities for working with graph datasets, e.g., we can shuffle the dataset and use the first 150 graphs as training graphs, while using the remaining ones for testing:
train_data, test_data = splitobs((graphs, y), at = 150, shuffle = true) |> getobs
-((GNNGraph{Tuple{Vector{Int64}, Vector{Int64}, Nothing}}[GNNGraph(16, 34) with x: 7×16 data, GNNGraph(22, 50) with x: 7×22 data, GNNGraph(23, 54) with x: 7×23 data, GNNGraph(11, 22) with x: 7×11 data, GNNGraph(17, 38) with x: 7×17 data, GNNGraph(13, 28) with x: 7×13 data, GNNGraph(19, 44) with x: 7×19 data, GNNGraph(16, 34) with x: 7×16 data, GNNGraph(14, 30) with x: 7×14 data, GNNGraph(18, 38) with x: 7×18 data … GNNGraph(12, 26) with x: 7×12 data, GNNGraph(19, 40) with x: 7×19 data, GNNGraph(19, 44) with x: 7×19 data, GNNGraph(26, 60) with x: 7×26 data, GNNGraph(20, 44) with x: 7×20 data, GNNGraph(20, 44) with x: 7×20 data, GNNGraph(17, 38) with x: 7×17 data, GNNGraph(19, 44) with x: 7×19 data, GNNGraph(19, 42) with x: 7×19 data, GNNGraph(22, 50) with x: 7×22 data], Bool[0 0 … 0 0; 1 1 … 1 1]), (GNNGraph{Tuple{Vector{Int64}, Vector{Int64}, Nothing}}[GNNGraph(26, 60) with x: 7×26 data, GNNGraph(15, 34) with x: 7×15 data, GNNGraph(11, 22) with x: 7×11 data, GNNGraph(24, 50) with x: 7×24 data, GNNGraph(17, 38) with x: 7×17 data, GNNGraph(21, 44) with x: 7×21 data, GNNGraph(17, 38) with x: 7×17 data, GNNGraph(13, 28) with x: 7×13 data, GNNGraph(12, 26) with x: 7×12 data, GNNGraph(17, 38) with x: 7×17 data … GNNGraph(12, 26) with x: 7×12 data, GNNGraph(23, 52) with x: 7×23 data, GNNGraph(12, 24) with x: 7×12 data, GNNGraph(23, 50) with x: 7×23 data, GNNGraph(13, 28) with x: 7×13 data, GNNGraph(18, 40) with x: 7×18 data, GNNGraph(16, 36) with x: 7×16 data, GNNGraph(13, 26) with x: 7×13 data, GNNGraph(28, 62) with x: 7×28 data, GNNGraph(11, 22) with x: 7×11 data], Bool[0 0 … 0 1; 1 1 … 1 0]))+
((GNNGraph{Tuple{Vector{Int64}, Vector{Int64}, Nothing}}[GNNGraph(12, 26) with x: 7×12 data, GNNGraph(23, 52) with x: 7×23 data, GNNGraph(12, 26) with x: 7×12 data, GNNGraph(16, 34) with x: 7×16 data, GNNGraph(15, 32) with x: 7×15 data, GNNGraph(13, 28) with x: 7×13 data, GNNGraph(11, 22) with x: 7×11 data, GNNGraph(23, 54) with x: 7×23 data, GNNGraph(15, 34) with x: 7×15 data, GNNGraph(22, 50) with x: 7×22 data … GNNGraph(16, 34) with x: 7×16 data, GNNGraph(19, 44) with x: 7×19 data, GNNGraph(26, 60) with x: 7×26 data, GNNGraph(20, 44) with x: 7×20 data, GNNGraph(16, 36) with x: 7×16 data, GNNGraph(15, 34) with x: 7×15 data, GNNGraph(23, 54) with x: 7×23 data, GNNGraph(22, 50) with x: 7×22 data, GNNGraph(23, 54) with x: 7×23 data, GNNGraph(13, 26) with x: 7×13 data], Bool[0 0 … 0 1; 1 1 … 1 0]), (GNNGraph{Tuple{Vector{Int64}, Vector{Int64}, Nothing}}[GNNGraph(13, 28) with x: 7×13 data, GNNGraph(14, 28) with x: 7×14 data, GNNGraph(19, 44) with x: 7×19 data, GNNGraph(17, 38) with x: 7×17 data, GNNGraph(16, 34) with x: 7×16 data, GNNGraph(19, 44) with x: 7×19 data, GNNGraph(10, 20) with x: 7×10 data, GNNGraph(20, 44) with x: 7×20 data, GNNGraph(25, 56) with x: 7×25 data, GNNGraph(20, 46) with x: 7×20 data … GNNGraph(12, 26) with x: 7×12 data, GNNGraph(21, 44) with x: 7×21 data, GNNGraph(17, 38) with x: 7×17 data, GNNGraph(22, 50) with x: 7×22 data, GNNGraph(11, 22) with x: 7×11 data, GNNGraph(22, 50) with x: 7×22 data, GNNGraph(11, 22) with x: 7×11 data, GNNGraph(24, 50) with x: 7×24 data, GNNGraph(12, 26) with x: 7×12 data, GNNGraph(19, 44) with x: 7×19 data], Bool[0 1 … 1 0; 1 0 … 0 1]))
begin
train_loader = DataLoader(train_data, batchsize = 32, shuffle = true)
@@ -123,15 +123,15 @@ end
Since graphs in graph classification datasets are usually small, a good idea is to batch the graphs before inputting them into a Graph Neural Network to guarantee full GPU utilization. In the image or language domain, this procedure is typically achieved by rescaling or padding each example into a set of equally-sized shapes, and examples are then grouped in an additional dimension. The length of this dimension is then equal to the number of examples grouped in a mini-batch and is typically referred to as the batchsize
.
However, for GNNs the two approaches described above are either not feasible or may result in a lot of unnecessary memory consumption. Therefore, GraphNeuralNetworks.jl opts for another approach to achieve parallelization across a number of examples. Here, adjacency matrices are stacked in a diagonal fashion (creating a giant graph that holds multiple isolated subgraphs), and node and target features are simply concatenated in the node dimension (the last dimension).
This procedure has some crucial advantages over other batching procedures:
GNN operators that rely on a message passing scheme do not need to be modified since messages are not exchanged between two nodes that belong to different graphs.
There is no computational or memory overhead since adjacency matrices are saved in a sparse fashion holding only non-zero entries, i.e., the edges.
GraphNeuralNetworks.jl can batch multiple graphs into a single giant graph:
vec_gs, _ = first(train_loader)
-(GNNGraph{Tuple{Vector{Int64}, Vector{Int64}, Nothing}}[GNNGraph(19, 44) with x: 7×19 data, GNNGraph(20, 46) with x: 7×20 data, GNNGraph(15, 34) with x: 7×15 data, GNNGraph(25, 56) with x: 7×25 data, GNNGraph(17, 38) with x: 7×17 data, GNNGraph(20, 44) with x: 7×20 data, GNNGraph(16, 34) with x: 7×16 data, GNNGraph(11, 22) with x: 7×11 data, GNNGraph(19, 44) with x: 7×19 data, GNNGraph(20, 44) with x: 7×20 data … GNNGraph(12, 24) with x: 7×12 data, GNNGraph(12, 26) with x: 7×12 data, GNNGraph(16, 36) with x: 7×16 data, GNNGraph(11, 22) with x: 7×11 data, GNNGraph(22, 50) with x: 7×22 data, GNNGraph(13, 28) with x: 7×13 data, GNNGraph(14, 30) with x: 7×14 data, GNNGraph(16, 34) with x: 7×16 data, GNNGraph(22, 50) with x: 7×22 data, GNNGraph(23, 54) with x: 7×23 data], Bool[0 0 … 0 0; 1 1 … 1 1])+
(GNNGraph{Tuple{Vector{Int64}, Vector{Int64}, Nothing}}[GNNGraph(13, 28) with x: 7×13 data, GNNGraph(15, 34) with x: 7×15 data, GNNGraph(11, 22) with x: 7×11 data, GNNGraph(17, 38) with x: 7×17 data, GNNGraph(23, 54) with x: 7×23 data, GNNGraph(14, 30) with x: 7×14 data, GNNGraph(16, 34) with x: 7×16 data, GNNGraph(17, 38) with x: 7×17 data, GNNGraph(13, 28) with x: 7×13 data, GNNGraph(19, 40) with x: 7×19 data … GNNGraph(26, 56) with x: 7×26 data, GNNGraph(13, 28) with x: 7×13 data, GNNGraph(11, 22) with x: 7×11 data, GNNGraph(18, 38) with x: 7×18 data, GNNGraph(28, 66) with x: 7×28 data, GNNGraph(11, 22) with x: 7×11 data, GNNGraph(13, 28) with x: 7×13 data, GNNGraph(18, 40) with x: 7×18 data, GNNGraph(16, 36) with x: 7×16 data, GNNGraph(22, 50) with x: 7×22 data], Bool[1 0 … 1 0; 0 1 … 0 1])
MLUtils.batch(vec_gs)
GNNGraph: - num_nodes: 575 - num_edges: 1276 + num_nodes: 569 + num_edges: 1258 num_graphs: 32 ndata: - x = 7×575 Matrix{Float32}+ x = 7×569 Matrix{Float32}
Each batched graph object is equipped with a graph_indicator
vector, which maps each node to its respective graph in the batch:
$$\textrm{graph\_indicator} = [1, \ldots, 1, 2, \ldots, 2, 3, \ldots ]$$
In this tutorial, we will be learning how to use Graph Neural Networks (GNNs) for node classification. Given the ground-truth labels of only a small subset of nodes, and want to infer the labels for all the remaining nodes (transductive learning).
In this tutorial, we will be learning how to use Graph Neural Networks (GNNs) for node classification. Given the ground-truth labels of only a small subset of nodes, and want to infer the labels for all the remaining nodes (transductive learning).
After training the model, we can call the accuracy
function to see how well our model performs on unseen labels. Here, we are interested in the accuracy of the model, i.e., the ratio of correctly classified nodes:
accuracy(mlp, g.ndata.features, y, .!train_mask)
-0.45794392523364486+
0.45872274143302183
As one can see, our MLP performs rather bad with only about 47% test accuracy. But why does the MLP do not perform better? The main reason for that is that this model suffers from heavy overfitting due to only having access to a small amount of training nodes, and therefore generalizes poorly to unseen node representations.
It also fails to incorporate an important bias into the model: Cited papers are very likely related to the category of a document. That is exactly where Graph Neural Networks come into play and can help to boost the performance of our model.
We certainly can do better by training our model. The training and testing procedure is once again the same, but this time we make use of the node features x
and the graph g
as input to our GCN model.
Train accuracy: 1.0 -Test accuracy: 0.7609034267912772 +Test accuracy: 0.7706386292834891@@ -316,7 +316,7 @@ Test accuracy: 0.7609034267912772 out_trained = gcn(g, x) |> transpose visualize_tsne(out_trained, g.ndata.targets) end - + ``` diff --git a/tutorials/docs/src/pluto_output/temporal_graph_classification_pluto.md b/tutorials/docs/src/pluto_output/temporal_graph_classification_pluto.md new file mode 100644 index 000000000..db5753f93 --- /dev/null +++ b/tutorials/docs/src/pluto_output/temporal_graph_classification_pluto.md @@ -0,0 +1,211 @@ +```@raw html + + + + + +
In this tutorial, we will learn how to extend the graph classification task to the case of temporal graphs, i.e., graphs whose topology and features are time-varying.
We will design and train a simple temporal graph neural network architecture to classify subjects' gender (female or male) using the temporal graphs extracted from their brain fMRI scan signals. Given the large amount of data, we will implement the training so that it can also run on the GPU.
We start by importing the necessary libraries. We use GraphNeuralNetworks.jl
, Flux.jl
and MLDatasets.jl
, among others.
begin
+ using Flux
+ using GraphNeuralNetworks
+ using Statistics, Random
+ using LinearAlgebra
+ using MLDatasets: TemporalBrains
+ using CUDA
+ using cuDNN
+end
+
+
+
+```
+## Dataset: TemporalBrains
+```@raw html
+The TemporalBrains dataset contains a collection of functional brain connectivity networks from 1000 subjects obtained from resting-state functional MRI data from the Human Connectome Project (HCP). Functional connectivity is defined as the temporal dependence of neuronal activation patterns of anatomically separated brain regions.
The graph nodes represent brain regions and their number is fixed at 102 for each of the 27 snapshots, while the edges, representing functional connectivity, change over time. For each snapshot, the feature of a node represents the average activation of the node during that snapshot. Each temporal graph has a label representing gender ('M' for male and 'F' for female) and age group (22-25, 26-30, 31-35, and 36+). The network's edge weights are binarized, and the threshold is set to 0.6 by default.
brain_dataset = TemporalBrains()
+dataset TemporalBrains: + graphs => 1000-element Vector{MLDatasets.TemporalSnapshotsGraph}+ + +
After loading the dataset from the MLDatasets.jl package, we see that there are 1000 graphs and we need to convert them to the TemporalSnapshotsGNNGraph
format. So we create a function called data_loader
that implements the latter and splits the dataset into the training set that will be used to train the model and the test set that will be used to test the performance of the model.
function data_loader(brain_dataset)
+ graphs = brain_dataset.graphs
+ dataset = Vector{TemporalSnapshotsGNNGraph}(undef, length(graphs))
+ for i in 1:length(graphs)
+ graph = graphs[i]
+ dataset[i] = TemporalSnapshotsGNNGraph(GraphNeuralNetworks.mlgraph2gnngraph.(graph.snapshots))
+ # Add graph and node features
+ for t in 1:27
+ s = dataset[i].snapshots[t]
+ s.ndata.x = [I(102); s.ndata.x']
+ end
+ dataset[i].tgdata.g = Float32.(Flux.onehot(graph.graph_data.g, ["F", "M"]))
+ end
+ # Split the dataset into a 80% training set and a 20% test set
+ train_loader = dataset[1:200]
+ test_loader = dataset[201:250]
+ return train_loader, test_loader
+end;
+
+
+
+The first part of the data_loader
function calls the mlgraph2gnngraph
function for each snapshot, which takes the graph and converts it to a GNNGraph
. The vector of GNNGraph
s is then rewritten to a TemporalSnapshotsGNNGraph
.
The second part adds the graph and node features to the temporal graphs, in particular it adds the one-hot encoding of the label of the graph (in this case we directly use the identity matrix) and appends the mean activation of the node of the snapshot (which is contained in the vector dataset[i].snapshots[t].ndata.x
, where i
is the index indicating the subject and t
is the snapshot). For the graph feature, it adds the one-hot encoding of gender.
The last part splits the dataset.
We now implement a simple model that takes a TemporalSnapshotsGNNGraph
as input. It consists of a GINConv
applied independently to each snapshot, a GlobalPool
to get an embedding for each snapshot, a pooling on the time dimension to get an embedding for the whole temporal graph, and finally a Dense
layer.
First, we start by adapting the GlobalPool
to the TemporalSnapshotsGNNGraphs
.
function (l::GlobalPool)(g::TemporalSnapshotsGNNGraph, x::AbstractVector)
+ h = [reduce_nodes(l.aggr, g[i], x[i]) for i in 1:(g.num_snapshots)]
+ sze = size(h[1])
+ reshape(reduce(hcat, h), sze[1], length(h))
+end
+
+
+
+Then we implement the constructor of the model, which we call GenderPredictionModel
, and the foward pass.
begin
+ struct GenderPredictionModel
+ gin::GINConv
+ mlp::Chain
+ globalpool::GlobalPool
+ f::Function
+ dense::Dense
+ end
+
+ Flux.@layer GenderPredictionModel
+
+ function GenderPredictionModel(; nfeatures = 103, nhidden = 128, activation = relu)
+ mlp = Chain(Dense(nfeatures, nhidden, activation), Dense(nhidden, nhidden, activation))
+ gin = GINConv(mlp, 0.5)
+ globalpool = GlobalPool(mean)
+ f = x -> mean(x, dims = 2)
+ dense = Dense(nhidden, 2)
+ GenderPredictionModel(gin, mlp, globalpool, f, dense)
+ end
+
+ function (m::GenderPredictionModel)(g::TemporalSnapshotsGNNGraph)
+ h = m.gin(g, g.ndata.x)
+ h = m.globalpool(g, h)
+ h = m.f(h)
+ m.dense(h)
+ end
+
+end
+
+
+
+```
+## Training
+```@raw html
+We train the model for 100 epochs, using the Adam optimizer with a learning rate of 0.001. We use the logitbinarycrossentropy
as the loss function, which is typically used as the loss in two-class classification, where the labels are given in a one-hot format. The accuracy expresses the number of correct classifications.
lossfunction(ŷ, y) = Flux.logitbinarycrossentropy(ŷ, y);
+
+
+function eval_loss_accuracy(model, data_loader)
+ error = mean([lossfunction(model(g), g.tgdata.g) for g in data_loader])
+ acc = mean([round(100 * mean(Flux.onecold(model(g)) .== Flux.onecold(g.tgdata.g)); digits = 2) for g in data_loader])
+ return (loss = error, acc = acc)
+end;
+
+
+function train(dataset; usecuda::Bool, kws...)
+
+ if usecuda && CUDA.functional() #check if GPU is available
+ my_device = gpu
+ @info "Training on GPU"
+ else
+ my_device = cpu
+ @info "Training on CPU"
+ end
+
+ function report(epoch)
+ train_loss, train_acc = eval_loss_accuracy(model, train_loader)
+ test_loss, test_acc = eval_loss_accuracy(model, test_loader)
+ println("Epoch: $epoch $((; train_loss, train_acc)) $((; test_loss, test_acc))")
+ return (train_loss, train_acc, test_loss, test_acc)
+ end
+
+ model = GenderPredictionModel() |> my_device
+
+ opt = Flux.setup(Adam(1.0f-3), model)
+
+ train_loader, test_loader = data_loader(dataset)
+ train_loader = train_loader |> my_device
+ test_loader = test_loader |> my_device
+
+ report(0)
+ for epoch in 1:100
+ for g in train_loader
+ grads = Flux.gradient(model) do model
+ ŷ = model(g)
+ lossfunction(vec(ŷ), g.tgdata.g)
+ end
+ Flux.update!(opt, model, grads[1])
+ end
+ if epoch % 10 == 0
+ report(epoch)
+ end
+ end
+ return model
+end;
+
+
+
+train(brain_dataset; usecuda = true)
+GenderPredictionModel(GINConv(Chain(Dense(103 => 128, relu), Dense(128 => 128, relu)), 0.5), Chain(Dense(103 => 128, relu), Dense(128 => 128, relu)), GlobalPool{typeof(mean)}(Statistics.mean), var"#4#5"(), Dense(128 => 2)) # 30_082 parameters, plus 29_824 non-trainable+ + +
We set up the training on the GPU because training takes a lot of time, especially when working on the CPU.
In this tutorial, we implemented a very simple architecture to classify temporal graphs in the context of gender classification using brain data. We then trained the model on the GPU for 100 epochs on the TemporalBrains dataset. The accuracy of the model is approximately 75-80%, but can be improved by fine-tuning the parameters and training on more data.
In this tutorial, we will learn how to use a recurrent Temporal Graph Convolutional Network (TGCN) to predict traffic in a spatio-temporal setting. Traffic forecasting is the problem of predicting future traffic trends on a road network given historical traffic data, such as, in our case, traffic speed and time of day.
We start by importing the necessary libraries. We use GraphNeuralNetworks.jl
, Flux.jl
and MLDatasets.jl
, among others.
begin
+ using GraphNeuralNetworks
+ using Flux
+ using Flux.Losses: mae
+ using MLDatasets: METRLA
+ using Statistics
+ using Plots
+end
+
+
+
+```
+## Dataset: METR-LA
+```@raw html
+We use the METR-LA
dataset from the paper Diffusion Convolutional Recurrent Neural Network: Data-driven Traffic Forecasting, which contains traffic data from loop detectors in the highway of Los Angeles County. The dataset contains traffic speed data from March 1, 2012 to June 30, 2012. The data is collected every 5 minutes, resulting in 12 observations per hour, from 207 sensors. Each sensor is a node in the graph, and the edges represent the distances between the sensors.
dataset_metrla = METRLA(; num_timesteps = 3)
+dataset METRLA: + graphs => 1-element Vector{MLDatasets.Graph}+ +
g = dataset_metrla[1]
+Graph: + num_nodes => 207 + num_edges => 1722 + edge_index => ("1722-element Vector{Int64}", "1722-element Vector{Int64}") + node_data => (features = "34269-element Vector{Any}", targets = "34269-element Vector{Any}") + edge_data => 1722-element Vector{Float32}+ + +
edge_data
contains the weights of the edges of the graph and node_data
contains a node feature vector and a target vector. The latter vectors contain batches of dimension num_timesteps
, which means that they contain vectors with the node features and targets of num_timesteps
time steps. Two consecutive batches are shifted by one-time step. The node features are the traffic speed of the sensors and the time of the day, and the targets are the traffic speed of the sensors in the next time step. Let's see some examples:
size(g.node_data.features[1])
+(2, 207, 3)+ + +
The first dimension correspond to the two features (first line the speed value and the second line the time of the day), the second to the nodes and the third to the number of timestep num_timesteps
.
size(g.node_data.targets[1])
+(1, 207, 3)+ + +
In the case of the targets the first dimension is 1 because they store just the speed value.
g.node_data.features[1][:,1,:]
+2×3 Matrix{Float32}: + 1.17081 1.11647 1.15888 + -0.876741 -0.87663 -0.87652+ +
g.node_data.features[2][:,1,:]
+2×3 Matrix{Float32}: + 1.11647 1.15888 -0.876741 + -0.87663 -0.87652 -0.87641+ +
g.node_data.targets[1][:,1,:]
+1×3 Matrix{Float32}: + 1.11647 1.15888 -0.876741+ +
function plot_data(data,sensor)
+ p = plot(legend=false, xlabel="Time (h)", ylabel="Normalized speed")
+ plotdata = []
+ for i in 1:3:length(data)
+ push!(plotdata,data[i][1,sensor,:])
+ end
+ plotdata = reduce(vcat,plotdata)
+ plot!(p, collect(1:length(data)), plotdata, color = :green, xticks =([i for i in 0:50:250], ["$(i)" for i in 0:4:24]))
+ return p
+end
+plot_data (generic function with 1 method)+ +
plot_data(g.node_data.features[1:288],1)
+
+
+
+Now let's construct the static graph, the temporal features and targets from the dataset.
begin
+ graph = GNNGraph(g.edge_index; edata = g.edge_data, g.num_nodes)
+ features = g.node_data.features
+ targets = g.node_data.targets
+end;
+
+
+
+Now let's construct the train_loader
and data_loader
.
begin
+ train_loader = zip(features[1:200], targets[1:200])
+ test_loader = zip(features[2001:2288], targets[2001:2288])
+end;
+
+
+
+```
+## Model: T-GCN
+```@raw html
+We use the T-GCN model from the paper T-GCN: A Temporal Graph Convolutional Network for Traffic Prediction, which consists of a graph convolutional network (GCN) and a gated recurrent unit (GRU). The GCN is used to capture spatial features from the graph, and the GRU is used to capture temporal features from the feature time series.
model = GNNChain(TGCN(2 => 100), Dense(100, 1))
+GNNChain(Recur(TGCNCell(2 => 100)), Dense(100 => 1))+ + +
We train the model for 100 epochs, using the Adam optimizer with a learning rate of 0.001. We use the mean absolute error (MAE) as the loss function.
function train(graph, train_loader, model)
+
+ opt = Flux.setup(Adam(0.001), model)
+
+ for epoch in 1:100
+ for (x, y) in train_loader
+ x, y = (x, y)
+ grads = Flux.gradient(model) do model
+ ŷ = model(graph, x)
+ Flux.mae(ŷ, y)
+ end
+ Flux.update!(opt, model, grads[1])
+ end
+
+ if epoch % 10 == 0
+ loss = mean([Flux.mae(model(graph,x), y) for (x, y) in train_loader])
+ @show epoch, loss
+ end
+ end
+ return model
+end
+train (generic function with 1 method)+ +
train(graph, train_loader, model)
+GNNChain(Recur(TGCNCell(2 => 100)), Dense(100 => 1))+ +
function plot_predicted_data(graph,features,targets, sensor)
+ p = plot(xlabel="Time (h)", ylabel="Normalized speed")
+ prediction = []
+ grand_truth = []
+ for i in 1:3:length(features)
+ push!(grand_truth,targets[i][1,sensor,:])
+ push!(prediction, model(graph, features[i])[1,sensor,:])
+ end
+ prediction = reduce(vcat,prediction)
+ grand_truth = reduce(vcat, grand_truth)
+ plot!(p, collect(1:length(features)), grand_truth, color = :blue, label = "Grand Truth", xticks =([i for i in 0:50:250], ["$(i)" for i in 0:4:24]))
+ plot!(p, collect(1:length(features)), prediction, color = :red, label= "Prediction")
+ return p
+end
+plot_predicted_data (generic function with 1 method)+ +
plot_predicted_data(graph,features[301:588],targets[301:588], 1)
+
+
+accuracy(ŷ, y) = 1 - Statistics.norm(y-ŷ)/Statistics.norm(y)
+accuracy (generic function with 1 method)+ +
mean([accuracy(model(graph,x), y) for (x, y) in test_loader])
+0.47803628f0+ + +
The accuracy is not very good but can be improved by training using more data. We used a small subset of the dataset for this tutorial because of the computational cost of training the model. From the plot of the predictions, we can see that the model is able to capture the general trend of the traffic speed, but it is not able to capture the peaks of the traffic.
In this tutorial, we learned how to use a recurrent temporal graph convolutional network to predict traffic in a spatio-temporal setting. We used the TGCN model, which consists of a graph convolutional network (GCN) and a gated recurrent unit (GRU). We then trained the model for 100 epochs on a small subset of the METR-LA dataset. The accuracy of the model is not very good, but it can be improved by training on more data.