diff --git a/CHANGELOG.md b/CHANGELOG.md index efc3c0eca274..30901ee8a5f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -75,6 +75,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Added `edge_attr` in `CuGraphGATConv` ([#10383](https://github.com/pyg-team/pytorch_geometric/pull/10383)) - Adapt `dgcnn_classification` example to work with `ModelNet` and `MedShapeNet` Datasets ([#9823](https://github.com/pyg-team/pytorch_geometric/pull/9823)) - Chained exceptions explicitly instead of implicitly ([#10242](https://github.com/pyg-team/pytorch_geometric/pull/10242)) - Updated cuGraph examples to use buffered sampling which keeps data in memory and is significantly faster than the deprecated buffered sampling ([#10079](https://github.com/pyg-team/pytorch_geometric/pull/10079)) diff --git a/docs/source/install/installation.rst b/docs/source/install/installation.rst index 8edf7f22fdae..4cf4384dcf37 100644 --- a/docs/source/install/installation.rst +++ b/docs/source/install/installation.rst @@ -187,6 +187,13 @@ If :conda:`null` :obj:`conda` does not pick up the correct CUDA version of :pyg: conda install pyg=*=*cu* -c pyg +Enabling Accelerated cuGraph GNNs +--------------------------------- + +Currently, NVIDIA recommends `NVIDIA PyG Container _` to use cuGraph integration in PyG. +This functionality is planned to be enabled through cuDNN which is part of PyTorch builds. We still recommend using the NVIDIA PyG Container regardless to have the fastest and most stable build of the NVIDIA CUDA stack combined with PyTorch and PyG. + + Frequently Asked Questions -------------------------- diff --git a/test/nn/conv/cugraph/test_cugraph_gat_conv.py b/test/nn/conv/cugraph/test_cugraph_gat_conv.py index 21def239b10b..069609938cda 100644 --- a/test/nn/conv/cugraph/test_cugraph_gat_conv.py +++ b/test/nn/conv/cugraph/test_cugraph_gat_conv.py @@ -11,9 +11,11 @@ @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('bipartite', [True, False]) @pytest.mark.parametrize('concat', [True, False]) +@pytest.mark.parametrize('edge_attr', [True, False]) @pytest.mark.parametrize('heads', [1, 2, 3]) @pytest.mark.parametrize('max_num_neighbors', [8, None]) -def test_gat_conv_equality(bias, bipartite, concat, heads, max_num_neighbors): +def test_gat_conv_equality(bias, bipartite, concat, edge_attr, heads, + max_num_neighbors): in_channels, out_channels = 5, 2 kwargs = dict(bias=bias, concat=concat) @@ -32,17 +34,27 @@ def test_gat_conv_equality(bias, bipartite, concat, heads, max_num_neighbors): conv2.lin.weight.data[:, :] = conv1.lin.weight.data conv2.att.data[:heads * out_channels] = conv1.att_src.data.flatten() conv2.att.data[heads * out_channels:] = conv1.att_dst.data.flatten() + if edge_attr and not bipartite: + e_attrs = torch.randn(size=(edge_index.size(1), 10)) + out1 = conv1(x, edge_index, edge_attr=e_attrs) - if bipartite: - out1 = conv1((x, x[:size[1]]), edge_index) + out2 = conv2( + x, + EdgeIndex(edge_index, sparse_size=size), + max_num_neighbors=max_num_neighbors, + edge_attr=e_attrs, + ) else: - out1 = conv1(x, edge_index) + if bipartite: + out1 = conv1((x, x[:size[1]]), edge_index) + else: + out1 = conv1(x, edge_index) - out2 = conv2( - x, - EdgeIndex(edge_index, sparse_size=size), - max_num_neighbors=max_num_neighbors, - ) + out2 = conv2( + x, + EdgeIndex(edge_index, sparse_size=size), + max_num_neighbors=max_num_neighbors, + ) assert torch.allclose(out1, out2, atol=1e-3) grad_output = torch.rand_like(out1) diff --git a/torch_geometric/nn/conv/cugraph/gat_conv.py b/torch_geometric/nn/conv/cugraph/gat_conv.py index ecf83b40277c..576308d11871 100644 --- a/torch_geometric/nn/conv/cugraph/gat_conv.py +++ b/torch_geometric/nn/conv/cugraph/gat_conv.py @@ -26,6 +26,9 @@ class CuGraphGATConv(CuGraphModule): # pragma: no cover :class:`~torch_geometric.nn.conv.GATConv` based on the :obj:`cugraph-ops` package that fuses message passing computation for accelerated execution and lower memory footprint. + The current method to enable :obj:`cugraph-ops` + is to use `The NVIDIA PyG Container + `_. """ def __init__( self, @@ -67,6 +70,7 @@ def forward( self, x: Tensor, edge_index: EdgeIndex, + edge_attr: Tensor, max_num_neighbors: Optional[int] = None, ) -> Tensor: graph = self.get_cugraph(edge_index, max_num_neighbors) @@ -75,10 +79,12 @@ def forward( if LEGACY_MODE: out = GATConvAgg(x, self.att, graph, self.heads, 'LeakyReLU', - self.negative_slope, False, self.concat) + self.negative_slope, False, self.concat, + edge_feat=edge_attr) else: out = GATConvAgg(x, self.att, graph, self.heads, 'LeakyReLU', - self.negative_slope, self.concat) + self.negative_slope, self.concat, + edge_feat=edge_attr) if self.bias is not None: out = out + self.bias diff --git a/torch_geometric/nn/conv/cugraph/rgcn_conv.py b/torch_geometric/nn/conv/cugraph/rgcn_conv.py index 23ca0e948436..259b744eeb0e 100644 --- a/torch_geometric/nn/conv/cugraph/rgcn_conv.py +++ b/torch_geometric/nn/conv/cugraph/rgcn_conv.py @@ -29,6 +29,9 @@ class CuGraphRGCNConv(CuGraphModule): # pragma: no cover :class:`~torch_geometric.nn.conv.RGCNConv` based on the :obj:`cugraph-ops` package that fuses message passing computation for accelerated execution and lower memory footprint. + The current method to enable :obj:`cugraph-ops` + is to use `The NVIDIA PyG Container + `_. """ def __init__(self, in_channels: int, out_channels: int, num_relations: int, num_bases: Optional[int] = None, aggr: str = 'mean', diff --git a/torch_geometric/nn/conv/cugraph/sage_conv.py b/torch_geometric/nn/conv/cugraph/sage_conv.py index 26b5f3688798..9eebac669f0e 100644 --- a/torch_geometric/nn/conv/cugraph/sage_conv.py +++ b/torch_geometric/nn/conv/cugraph/sage_conv.py @@ -27,6 +27,9 @@ class CuGraphSAGEConv(CuGraphModule): # pragma: no cover :class:`~torch_geometric.nn.conv.SAGEConv` based on the :obj:`cugraph-ops` package that fuses message passing computation for accelerated execution and lower memory footprint. + The current method to enable :obj:`cugraph-ops` + is to use `The NVIDIA PyG Container + `_. """ def __init__( self,