Skip to content

Commit

Permalink
Put in common gnn features extractor between ray.rllib and sb3 wrappers
Browse files Browse the repository at this point in the history
The GNN features extractor used by rallib and sb3 are the same (except
the one for sb3 derives from a BaseFeaturesExtractor) and so is the
function conveting GraphInstance into torch_geometric.data.Data.

We put that in common in hub/solver/utils/gnn.

We also remove unused code in ray.rllib gnn code.
  • Loading branch information
nhuet committed Jan 20, 2025
1 parent 324ebf4 commit f08e09a
Show file tree
Hide file tree
Showing 10 changed files with 50 additions and 251 deletions.
2 changes: 1 addition & 1 deletion skdecide/hub/solver/ray_rllib/gnn/models/torch/gnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@
from ray.rllib.utils.typing import ModelConfigDict
from torch import nn

from skdecide.hub.solver.ray_rllib.gnn.torch_layers import GraphFeaturesExtractor
from skdecide.hub.solver.ray_rllib.gnn.utils.spaces.space_utils import (
convert_dict_space_to_graph_space,
is_graph_dict_space,
)
from skdecide.hub.solver.utils.gnn.torch_layers import GraphFeaturesExtractor


class GnnBasedModel(TorchModelV2, nn.Module):
Expand Down
116 changes: 0 additions & 116 deletions skdecide/hub/solver/ray_rllib/gnn/policy/sample_batch.py

This file was deleted.

26 changes: 0 additions & 26 deletions skdecide/hub/solver/ray_rllib/gnn/policy/torch_mixins.py

This file was deleted.

25 changes: 1 addition & 24 deletions skdecide/hub/solver/ray_rllib/gnn/utils/torch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,30 +15,7 @@
is_graph_dict_multiinput,
is_masked_obs,
)


def graph_obs_to_thg_data(
obs: gym.spaces.GraphInstance,
device: Optional[th.device] = None,
pin_memory: bool = False,
) -> thg.data.Data:
# Node features
flatten_node_features = obs.nodes.reshape((len(obs.nodes), -1))
x = th.tensor(flatten_node_features).float()
# Edge features
if obs.edges is None:
edge_attr = None
else:
flatten_edge_features = obs.edges.reshape((len(obs.edges), -1))
edge_attr = th.tensor(flatten_edge_features).float()
edge_index = th.tensor(obs.edge_links, dtype=th.long).t().contiguous().view(2, -1)
# thg.Data
data = thg.data.Data(x=x, edge_index=edge_index, edge_attr=edge_attr)
# Pin the tensor's memory (for faster transfer to GPU later).
if pin_memory and th.cuda.is_available():
data.pin_memory()

return data if device is None else data.to(device)
from skdecide.hub.solver.utils.gnn.torch_utils import graph_obs_to_thg_data


def convert_to_torch_tensor(
Expand Down
83 changes: 16 additions & 67 deletions skdecide/hub/solver/stable_baselines/gnn/common/torch_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
from torch import nn
from torch_geometric.nn import global_max_pool

from skdecide.hub.solver.utils.gnn import torch_layers


class GraphFeaturesExtractor(BaseFeaturesExtractor):
"""Graph feature extractor for Graph observation spaces.
Expand All @@ -20,6 +22,10 @@ class GraphFeaturesExtractor(BaseFeaturesExtractor):
- gnn: a 2-layers GCN
- reduction layer: global_max_pool + linear layer + relu
This merely wraps `skdecide.hub.solver.utils.gnn.torch_layers.GraphFeaturesExtractor` to
makes it a `stable_baselines3.common.torch_layers.BaseFeaturesExtractor`. See the former documentation
for more precisions about its arguments.
Args:
observation_space:
features_dim: Number of extracted features
Expand All @@ -45,75 +51,18 @@ def __init__(
reduction_layer_class: Optional[type[nn.Module]] = None,
reduction_layer_kwargs: Optional[dict[str, Any]] = None,
):

super().__init__(observation_space, features_dim=features_dim)

if gnn_out_dim is None:
if gnn_class is None:
gnn_out_dim = 2 * features_dim
else:
raise ValueError(
"`gnn_out_dim` cannot be None if `gnn` is not None, "
"and should match `gnn` output."
)

if gnn_class is None:
node_features_dim = int(np.prod(observation_space.node_space.shape))
self.gnn = thg.nn.models.GCN(
in_channels=node_features_dim,
hidden_channels=gnn_out_dim,
num_layers=2,
dropout=0.2,
)
else:
if gnn_kwargs is None:
gnn_kwargs = {}
self.gnn = gnn_class(**gnn_kwargs)

if reduction_layer_class is None:
self.reduction_layer = _DefaultReductionLayer(
gnn_out_dim=gnn_out_dim, features_dim=features_dim
)
else:
if reduction_layer_kwargs is None:
reduction_layer_kwargs = {}
self.reduction_layer = reduction_layer_class(**reduction_layer_kwargs)
self._extractor = torch_layers.GraphFeaturesExtractor(
observation_space=observation_space,
features_dim=features_dim,
gnn_out_dim=gnn_out_dim,
gnn_class=gnn_class,
gnn_kwargs=gnn_kwargs,
reduction_layer_class=reduction_layer_class,
reduction_layer_kwargs=reduction_layer_kwargs,
)

def forward(self, observations: thg.data.Data) -> th.Tensor:
x, edge_index, edge_attr, batch = (
observations.x,
observations.edge_index,
observations.edge_attr,
observations.batch,
)
# construct edge weights, for GNNs needing it, as the first edge feature
edge_weight = edge_attr[:, 0]
h = self.gnn(
x=x, edge_index=edge_index, edge_weight=edge_weight, edge_attr=edge_attr
)
embedded_observations = thg.data.Data(
x=h, edge_index=edge_index, edge_attr=edge_attr, batch=batch
)
h = self.reduction_layer(embedded_observations=embedded_observations)
return h


class _DefaultReductionLayer(nn.Module):
def __init__(self, gnn_out_dim: int, features_dim: int):
super().__init__()
self.gnn_out_dim = gnn_out_dim
self.features_dim = features_dim
self.linear_layer = nn.Linear(gnn_out_dim, features_dim)

def forward(self, embedded_observations: thg.data.Data) -> th.Tensor:
x, edge_index, batch = (
embedded_observations.x,
embedded_observations.edge_index,
embedded_observations.batch,
)
h = global_max_pool(x, batch)
h = self.linear_layer(h).relu()
return h
return self._extractor.forward(observations=observations)


class CombinedFeaturesExtractor(BaseFeaturesExtractor):
Expand Down
18 changes: 2 additions & 16 deletions skdecide/hub/solver/stable_baselines/gnn/common/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
import torch as th
import torch_geometric as thg

from skdecide.hub.solver.utils.gnn.torch_utils import graph_obs_to_thg_data

SubObsType = Union[np.ndarray, gym.spaces.GraphInstance, list[gym.spaces.GraphInstance]]
ObsType = Union[SubObsType, dict[str, SubObsType]]
TorchSubObsType = Union[th.Tensor, thg.data.Data]
Expand All @@ -27,22 +29,6 @@ def copy_np_array_or_list_of_graph_instances(
return np.copy(obs)


def graph_obs_to_thg_data(
obs: gym.spaces.GraphInstance, device: th.device
) -> thg.data.Data:
# Node features
flatten_node_features = obs.nodes.reshape((len(obs.nodes), -1))
x = th.tensor(flatten_node_features).float()
# Edge features
if obs.edges is None:
edge_attr = None
else:
flatten_edge_features = obs.edges.reshape((len(obs.edges), -1))
edge_attr = th.tensor(flatten_edge_features).float()
edge_index = th.tensor(obs.edge_links, dtype=th.long).t().contiguous().view(2, -1)
return thg.data.Data(x=x, edge_index=edge_index, edge_attr=edge_attr).to(device)


def obs_as_tensor(
obs: ObsType,
device: th.device,
Expand Down
Empty file.
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def __init__(
reduction_layer_kwargs = {}
self.reduction_layer = reduction_layer_class(**reduction_layer_kwargs)

def forward(self, observations) -> th.Tensor:
def forward(self, observations: thg.data.Data) -> th.Tensor:
x, edge_index, edge_attr, batch = (
observations.x,
observations.edge_index,
Expand Down
29 changes: 29 additions & 0 deletions skdecide/hub/solver/utils/gnn/torch_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
from typing import Optional

import gymnasium as gym
import torch as th
import torch_geometric as thg


def graph_obs_to_thg_data(
obs: gym.spaces.GraphInstance,
device: Optional[th.device] = None,
pin_memory: bool = False,
) -> thg.data.Data:
# Node features
flatten_node_features = obs.nodes.reshape((len(obs.nodes), -1))
x = th.tensor(flatten_node_features).float()
# Edge features
if obs.edges is None:
edge_attr = None
else:
flatten_edge_features = obs.edges.reshape((len(obs.edges), -1))
edge_attr = th.tensor(flatten_edge_features).float()
edge_index = th.tensor(obs.edge_links, dtype=th.long).t().contiguous().view(2, -1)
# thg.Data
data = thg.data.Data(x=x, edge_index=edge_index, edge_attr=edge_attr)
# Pin the tensor's memory (for faster transfer to GPU later).
if pin_memory and th.cuda.is_available():
data.pin_memory()

return data if device is None else data.to(device)

0 comments on commit f08e09a

Please sign in to comment.