Unverified Commit 863c2495 authored by Yang Su's avatar Yang Su Committed by GitHub
Browse files

Fix User Guide EN and CN typo (#3495)



* Update graph-heterogeneous.rst

`tensor([0, 1, 2, 0, 1, 2])` should be output instead of code

* Update message-api.rst

`updata_all_example()` should be `update_all_example()`

* Update message-efficient.rst

`cat_feat` need to concatenate with `dim=1` for the # edge features to match # edges

* Update nn-construction.rst

all `max_pool` in the aggregator type of `SAGEConv` should be `pool` instead

* Update graph-heterogeneous.rst

`tensor([0, 1, 2, 0, 1, 2])` should be output instead of code

* Update message-api.rst

`updata_all_example()` should be `update_all_example()`

* Update message-efficient.rst

`cat_feat` need to concatenate with `dim=1` for the # edge features to match # edges

* Update nn-construction.rst

all `max_pool` in the aggregator type of `SAGEConv` should be `pool` instead

* Update nn-forward.rst

all `max_pool` in the aggregator type of `SAGEConv` should be `pool` instead

* Update nn-forward.rst

all `max_pool` in the aggregator type of `SAGEConv` should be `pool` instead
Co-authored-by: default avatarzhjwy9343 <6593865@qq.com>
parent 473d5e0a
...@@ -251,7 +251,7 @@ The original node/edge types and type-specific IDs are stored in :py:attr:`~dgl. ...@@ -251,7 +251,7 @@ The original node/edge types and type-specific IDs are stored in :py:attr:`~dgl.
tensor([0, 0, 0, 1, 1, 1]) tensor([0, 0, 0, 1, 1, 1])
>>> # Original type-specific node IDs >>> # Original type-specific node IDs
>>> hg.ndata[dgl.NID] >>> hg.ndata[dgl.NID]
>>> tensor([0, 1, 2, 0, 1, 2]) tensor([0, 1, 2, 0, 1, 2])
>>> # Order of edge types in the heterograph >>> # Order of edge types in the heterograph
>>> g.etypes >>> g.etypes
......
...@@ -82,7 +82,7 @@ example: ...@@ -82,7 +82,7 @@ example:
.. code:: .. code::
def updata_all_example(graph): def update_all_example(graph):
# store the result in graph.ndata['ft'] # store the result in graph.ndata['ft']
graph.update_all(fn.u_mul_e('ft', 'a', 'm'), graph.update_all(fn.u_mul_e('ft', 'a', 'm'),
fn.sum('m', 'ft')) fn.sum('m', 'ft'))
...@@ -105,4 +105,4 @@ compute capacity requirement of ``sm_53`` (Pascal, Volta, Turing and Ampere ...@@ -105,4 +105,4 @@ compute capacity requirement of ``sm_53`` (Pascal, Volta, Turing and Ampere
architectures). architectures).
User can enable float16 for mixed precision training by compiling DGL from source User can enable float16 for mixed precision training by compiling DGL from source
(see :doc:`Mixed Precision Training <mixed_precision>` tutorial for details). (see :doc:`Mixed Precision Training <mixed_precision>` tutorial for details).
\ No newline at end of file
...@@ -32,7 +32,7 @@ implementation would be like: ...@@ -32,7 +32,7 @@ implementation would be like:
linear = nn.Parameter(torch.FloatTensor(size=(node_feat_dim * 2, out_dim))) linear = nn.Parameter(torch.FloatTensor(size=(node_feat_dim * 2, out_dim)))
def concat_message_function(edges): def concat_message_function(edges):
return {'cat_feat': torch.cat([edges.src['feat'], edges.dst['feat']])} return {'cat_feat': torch.cat([edges.src['feat'], edges.dst['feat']], dim=1)}
g.apply_edges(concat_message_function) g.apply_edges(concat_message_function)
g.edata['out'] = g.edata['cat_feat'] @ linear g.edata['out'] = g.edata['cat_feat'] @ linear
......
...@@ -52,14 +52,14 @@ SAGEConv paper, such normalization can be l2 normalization: ...@@ -52,14 +52,14 @@ SAGEConv paper, such normalization can be l2 normalization:
.. code:: .. code::
# aggregator type: mean, max_pool, lstm, gcn # aggregator type: mean, pool, lstm, gcn
if aggregator_type not in ['mean', 'max_pool', 'lstm', 'gcn']: if aggregator_type not in ['mean', 'pool', 'lstm', 'gcn']:
raise KeyError('Aggregator type {} not supported.'.format(aggregator_type)) raise KeyError('Aggregator type {} not supported.'.format(aggregator_type))
if aggregator_type == 'max_pool': if aggregator_type == 'pool':
self.fc_pool = nn.Linear(self._in_src_feats, self._in_src_feats) self.fc_pool = nn.Linear(self._in_src_feats, self._in_src_feats)
if aggregator_type == 'lstm': if aggregator_type == 'lstm':
self.lstm = nn.LSTM(self._in_src_feats, self._in_src_feats, batch_first=True) self.lstm = nn.LSTM(self._in_src_feats, self._in_src_feats, batch_first=True)
if aggregator_type in ['mean', 'max_pool', 'lstm']: if aggregator_type in ['mean', 'pool', 'lstm']:
self.fc_self = nn.Linear(self._in_dst_feats, out_feats, bias=bias) self.fc_self = nn.Linear(self._in_dst_feats, out_feats, bias=bias)
self.fc_neigh = nn.Linear(self._in_src_feats, out_feats, bias=bias) self.fc_neigh = nn.Linear(self._in_src_feats, out_feats, bias=bias)
self.reset_parameters() self.reset_parameters()
...@@ -75,7 +75,7 @@ function, weight initialization is applied by calling ...@@ -75,7 +75,7 @@ function, weight initialization is applied by calling
def reset_parameters(self): def reset_parameters(self):
"""Reinitialize learnable parameters.""" """Reinitialize learnable parameters."""
gain = nn.init.calculate_gain('relu') gain = nn.init.calculate_gain('relu')
if self._aggre_type == 'max_pool': if self._aggre_type == 'pool':
nn.init.xavier_uniform_(self.fc_pool.weight, gain=gain) nn.init.xavier_uniform_(self.fc_pool.weight, gain=gain)
if self._aggre_type == 'lstm': if self._aggre_type == 'lstm':
self.lstm.reset_parameters() self.lstm.reset_parameters()
......
...@@ -125,7 +125,7 @@ Message passing and reducing ...@@ -125,7 +125,7 @@ Message passing and reducing
# divide in_degrees # divide in_degrees
degs = graph.in_degrees().to(feat_dst) degs = graph.in_degrees().to(feat_dst)
h_neigh = (graph.dstdata['neigh'] + graph.dstdata['h']) / (degs.unsqueeze(-1) + 1) h_neigh = (graph.dstdata['neigh'] + graph.dstdata['h']) / (degs.unsqueeze(-1) + 1)
elif self._aggre_type == 'max_pool': elif self._aggre_type == 'pool':
graph.srcdata['h'] = F.relu(self.fc_pool(feat_src)) graph.srcdata['h'] = F.relu(self.fc_pool(feat_src))
graph.update_all(fn.copy_u('h', 'm'), fn.max('m', 'neigh')) graph.update_all(fn.copy_u('h', 'm'), fn.max('m', 'neigh'))
h_neigh = graph.dstdata['neigh'] h_neigh = graph.dstdata['neigh']
......
...@@ -235,7 +235,7 @@ DGL提供了 :func:`dgl.save_graphs` 和 :func:`dgl.load_graphs` 函数,分别 ...@@ -235,7 +235,7 @@ DGL提供了 :func:`dgl.save_graphs` 和 :func:`dgl.load_graphs` 函数,分别
tensor([0, 0, 0, 1, 1, 1]) tensor([0, 0, 0, 1, 1, 1])
>>> # 原始的特定类型节点ID >>> # 原始的特定类型节点ID
>>> hg.ndata[dgl.NID] >>> hg.ndata[dgl.NID]
>>> tensor([0, 1, 2, 0, 1, 2]) tensor([0, 1, 2, 0, 1, 2])
>>> # 异构图中边类型的顺序 >>> # 异构图中边类型的顺序
>>> g.etypes >>> g.etypes
......
...@@ -62,7 +62,7 @@ DGL支持内置的聚合函数 ``sum``、 ``max``、 ``min`` 和 ``mean`` 操作 ...@@ -62,7 +62,7 @@ DGL支持内置的聚合函数 ``sum``、 ``max``、 ``min`` 和 ``mean`` 操作
.. code:: .. code::
def updata_all_example(graph): def update_all_example(graph):
# 在graph.ndata['ft']中存储结果 # 在graph.ndata['ft']中存储结果
graph.update_all(fn.u_mul_e('ft', 'a', 'm'), graph.update_all(fn.u_mul_e('ft', 'a', 'm'),
fn.sum('m', 'ft')) fn.sum('m', 'ft'))
......
...@@ -22,7 +22,7 @@ DGL建议用户尽量减少边的特征维数。 ...@@ -22,7 +22,7 @@ DGL建议用户尽量减少边的特征维数。
linear = nn.Parameter(torch.FloatTensor(size=(node_feat_dim * 2, out_dim))) linear = nn.Parameter(torch.FloatTensor(size=(node_feat_dim * 2, out_dim)))
def concat_message_function(edges): def concat_message_function(edges):
return {'cat_feat': torch.cat([edges.src.ndata['feat'], edges.dst.ndata['feat']])} return {'cat_feat': torch.cat([edges.src.ndata['feat'], edges.dst.ndata['feat']], dim=1)}
g.apply_edges(concat_message_function) g.apply_edges(concat_message_function)
g.edata['out'] = g.edata['cat_feat'] @ linear g.edata['out'] = g.edata['cat_feat'] @ linear
......
...@@ -44,14 +44,14 @@ ...@@ -44,14 +44,14 @@
.. code:: .. code::
# 聚合类型:mean、max_pool、lstm、gcn # 聚合类型:mean、pool、lstm、gcn
if aggregator_type not in ['mean', 'max_pool', 'lstm', 'gcn']: if aggregator_type not in ['mean', 'pool', 'lstm', 'gcn']:
raise KeyError('Aggregator type {} not supported.'.format(aggregator_type)) raise KeyError('Aggregator type {} not supported.'.format(aggregator_type))
if aggregator_type == 'max_pool': if aggregator_type == 'pool':
self.fc_pool = nn.Linear(self._in_src_feats, self._in_src_feats) self.fc_pool = nn.Linear(self._in_src_feats, self._in_src_feats)
if aggregator_type == 'lstm': if aggregator_type == 'lstm':
self.lstm = nn.LSTM(self._in_src_feats, self._in_src_feats, batch_first=True) self.lstm = nn.LSTM(self._in_src_feats, self._in_src_feats, batch_first=True)
if aggregator_type in ['mean', 'max_pool', 'lstm']: if aggregator_type in ['mean', 'pool', 'lstm']:
self.fc_self = nn.Linear(self._in_dst_feats, out_feats, bias=bias) self.fc_self = nn.Linear(self._in_dst_feats, out_feats, bias=bias)
self.fc_neigh = nn.Linear(self._in_src_feats, out_feats, bias=bias) self.fc_neigh = nn.Linear(self._in_src_feats, out_feats, bias=bias)
self.reset_parameters() self.reset_parameters()
...@@ -64,7 +64,7 @@ ...@@ -64,7 +64,7 @@
def reset_parameters(self): def reset_parameters(self):
"""重新初始化可学习的参数""" """重新初始化可学习的参数"""
gain = nn.init.calculate_gain('relu') gain = nn.init.calculate_gain('relu')
if self._aggre_type == 'max_pool': if self._aggre_type == 'pool':
nn.init.xavier_uniform_(self.fc_pool.weight, gain=gain) nn.init.xavier_uniform_(self.fc_pool.weight, gain=gain)
if self._aggre_type == 'lstm': if self._aggre_type == 'lstm':
self.lstm.reset_parameters() self.lstm.reset_parameters()
......
...@@ -105,7 +105,7 @@ SAGEConv的数学公式如下: ...@@ -105,7 +105,7 @@ SAGEConv的数学公式如下:
# 除以入度 # 除以入度
degs = graph.in_degrees().to(feat_dst) degs = graph.in_degrees().to(feat_dst)
h_neigh = (graph.dstdata['neigh'] + graph.dstdata['h']) / (degs.unsqueeze(-1) + 1) h_neigh = (graph.dstdata['neigh'] + graph.dstdata['h']) / (degs.unsqueeze(-1) + 1)
elif self._aggre_type == 'max_pool': elif self._aggre_type == 'pool':
graph.srcdata['h'] = F.relu(self.fc_pool(feat_src)) graph.srcdata['h'] = F.relu(self.fc_pool(feat_src))
graph.update_all(fn.copy_u('h', 'm'), fn.max('m', 'neigh')) graph.update_all(fn.copy_u('h', 'm'), fn.max('m', 'neigh'))
h_neigh = graph.dstdata['neigh'] h_neigh = graph.dstdata['neigh']
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment