Unverified Commit 6383e649 authored by Kay Liu's avatar Kay Liu Committed by GitHub
Browse files

[Doc] correct the matrix multiplication symbols (#2961)



* correct the matrix multiplication symbols

* modified dimensionality of linear
Co-authored-by: default avatarMinjie Wang <wmjlyjemaine@gmail.com>
parent 7a816f47
...@@ -30,11 +30,11 @@ implementation would be like: ...@@ -30,11 +30,11 @@ implementation would be like:
import torch import torch
import torch.nn as nn import torch.nn as nn
linear = nn.Parameter(torch.FloatTensor(size=(1, node_feat_dim * 2))) linear = nn.Parameter(torch.FloatTensor(size=(node_feat_dim * 2, 1)))
def concat_message_function(edges): def concat_message_function(edges):
return {'cat_feat': torch.cat([edges.src['feat'], edges.dst['feat']])} return {'cat_feat': torch.cat([edges.src['feat'], edges.dst['feat']])}
g.apply_edges(concat_message_function) g.apply_edges(concat_message_function)
g.edata['out'] = g.edata['cat_feat'] * linear g.edata['out'] = g.edata['cat_feat'] @ linear
The suggested implementation splits the linear operation into two, The suggested implementation splits the linear operation into two,
one applies on ``src`` feature, the other applies on ``dst`` feature. one applies on ``src`` feature, the other applies on ``dst`` feature.
...@@ -48,10 +48,10 @@ respectively: ...@@ -48,10 +48,10 @@ respectively:
import dgl.function as fn import dgl.function as fn
linear_src = nn.Parameter(torch.FloatTensor(size=(1, node_feat_dim))) linear_src = nn.Parameter(torch.FloatTensor(size=(node_feat_dim, 1)))
linear_dst = nn.Parameter(torch.FloatTensor(size=(1, node_feat_dim))) linear_dst = nn.Parameter(torch.FloatTensor(size=(node_feat_dim, 1)))
out_src = g.ndata['feat'] * linear_src out_src = g.ndata['feat'] @ linear_src
out_dst = g.ndata['feat'] * linear_dst out_dst = g.ndata['feat'] @ linear_dst
g.srcdata.update({'out_src': out_src}) g.srcdata.update({'out_src': out_src})
g.dstdata.update({'out_dst': out_dst}) g.dstdata.update({'out_dst': out_dst})
g.apply_edges(fn.u_add_v('out_src', 'out_dst', 'out')) g.apply_edges(fn.u_add_v('out_src', 'out_dst', 'out'))
......
...@@ -20,11 +20,11 @@ DGL建议用户尽量减少边的特征维数。 ...@@ -20,11 +20,11 @@ DGL建议用户尽量减少边的特征维数。
import torch import torch
import torch.nn as nn import torch.nn as nn
linear = nn.Parameter(torch.FloatTensor(size=(1, node_feat_dim * 2))) linear = nn.Parameter(torch.FloatTensor(size=(node_feat_dim * 2, 1)))
def concat_message_function(edges): def concat_message_function(edges):
return {'cat_feat': torch.cat([edges.src.ndata['feat'], edges.dst.ndata['feat']])} return {'cat_feat': torch.cat([edges.src.ndata['feat'], edges.dst.ndata['feat']])}
g.apply_edges(concat_message_function) g.apply_edges(concat_message_function)
g.edata['out'] = g.edata['cat_feat'] * linear g.edata['out'] = g.edata['cat_feat'] @ linear
建议的实现是将线性操作分成两部分,一个应用于 ``源`` 节点特征,另一个应用于 ``目标`` 节点特征。 建议的实现是将线性操作分成两部分,一个应用于 ``源`` 节点特征,另一个应用于 ``目标`` 节点特征。
在最后一个阶段,在边上将以上两部分线性操作的结果相加,即执行 :math:`W_l\times u + W_r \times v`, 在最后一个阶段,在边上将以上两部分线性操作的结果相加,即执行 :math:`W_l\times u + W_r \times v`,
...@@ -35,10 +35,10 @@ DGL建议用户尽量减少边的特征维数。 ...@@ -35,10 +35,10 @@ DGL建议用户尽量减少边的特征维数。
import dgl.function as fn import dgl.function as fn
linear_src = nn.Parameter(torch.FloatTensor(size=(1, node_feat_dim))) linear_src = nn.Parameter(torch.FloatTensor(size=(node_feat_dim, 1)))
linear_dst = nn.Parameter(torch.FloatTensor(size=(1, node_feat_dim))) linear_dst = nn.Parameter(torch.FloatTensor(size=(node_feat_dim, 1)))
out_src = g.ndata['feat'] * linear_src out_src = g.ndata['feat'] @ linear_src
out_dst = g.ndata['feat'] * linear_dst out_dst = g.ndata['feat'] @ linear_dst
g.srcdata.update({'out_src': out_src}) g.srcdata.update({'out_src': out_src})
g.dstdata.update({'out_dst': out_dst}) g.dstdata.update({'out_dst': out_dst})
g.apply_edges(fn.u_add_v('out_src', 'out_dst', 'out')) g.apply_edges(fn.u_add_v('out_src', 'out_dst', 'out'))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment