Unverified Commit 6d9433b0 authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[Transform] [Doc] Rename transform to transforms and update doc (#3765)

* Update

* Update

* Update

* Fix

* Update

* Update

* Update

* Fix
parent ccaa0bf2
"""Modules that transforms between graphs and between graph and tensors."""
import torch.nn as nn
from ...transform import knn_graph, segmented_knn_graph
from ...transforms import knn_graph, segmented_knn_graph
def pairwise_squared_distance(x):
'''
......@@ -12,11 +12,7 @@ def pairwise_squared_distance(x):
class KNNGraph(nn.Module):
r"""
Description
-----------
Layer that transforms one point set into a graph, or a batch of
r"""Layer that transforms one point set into a graph, or a batch of
point sets with the same number of points into a union of those graphs.
The KNNGraph is implemented in the following steps:
......@@ -127,11 +123,7 @@ class KNNGraph(nn.Module):
class SegmentedKNNGraph(nn.Module):
r"""
Description
-----------
Layer that transforms one point set into a graph, or a batch of
r"""Layer that transforms one point set into a graph, or a batch of
point sets with different number of points into a union of those graphs.
If a batch of point sets is provided, then the point :math:`j` in the point
......
......@@ -15,11 +15,7 @@ __all__ = ['SumPooling', 'AvgPooling', 'MaxPooling', 'SortPooling',
'SetTransformerEncoder', 'SetTransformerDecoder', 'WeightAndSum']
class SumPooling(nn.Module):
r"""
Description
-----------
Apply sum pooling over the nodes in a graph .
r"""Apply sum pooling over the nodes in a graph.
.. math::
r^{(i)} = \sum_{k=1}^{N_i} x^{(i)}_k
......@@ -100,11 +96,7 @@ class SumPooling(nn.Module):
class AvgPooling(nn.Module):
r"""
Description
-----------
Apply average pooling over the nodes in a graph.
r"""Apply average pooling over the nodes in a graph.
.. math::
r^{(i)} = \frac{1}{N_i}\sum_{k=1}^{N_i} x^{(i)}_k
......@@ -185,11 +177,7 @@ class AvgPooling(nn.Module):
class MaxPooling(nn.Module):
r"""
Description
-----------
Apply max pooling over the nodes in a graph.
r"""Apply max pooling over the nodes in a graph.
.. math::
r^{(i)} = \max_{k=1}^{N_i}\left( x^{(i)}_k \right)
......@@ -268,13 +256,10 @@ class MaxPooling(nn.Module):
class SortPooling(nn.Module):
r"""
r"""Sort Pooling from `An End-to-End Deep Learning Architecture for Graph Classification
<https://www.cse.wustl.edu/~ychen/public/DGCNN.pdf>`__
Description
-----------
Apply Sort Pooling (`An End-to-End Deep Learning Architecture for Graph Classification
<https://www.cse.wustl.edu/~ychen/public/DGCNN.pdf>`__) over the nodes in a graph.
Sort Pooling first sorts the node features in ascending order along the feature dimension,
It first sorts the node features in ascending order along the feature dimension,
and selects the sorted features of top-k nodes (ranked by the largest value of each node).
Parameters
......@@ -345,7 +330,7 @@ class SortPooling(nn.Module):
graph : DGLGraph
A DGLGraph or a batch of DGLGraphs.
feat : torch.Tensor
The input feature with shape :math:`(N, D)`, where :math:`N` is the
The input node feature with shape :math:`(N, D)`, where :math:`N` is the
number of nodes in the graph, and :math:`D` means the size of features.
Returns
......@@ -365,12 +350,8 @@ class SortPooling(nn.Module):
class GlobalAttentionPooling(nn.Module):
r"""
Description
-----------
Apply Global Attention Pooling (`Gated Graph Sequence Neural Networks
<https://arxiv.org/abs/1511.05493.pdf>`__) over the nodes in a graph.
r"""Global Attention Pooling from `Gated Graph Sequence Neural Networks
<https://arxiv.org/abs/1511.05493>`__
.. math::
r^{(i)} = \sum_{k=1}^{N_i}\mathrm{softmax}\left(f_{gate}
......@@ -448,7 +429,7 @@ class GlobalAttentionPooling(nn.Module):
graph : DGLGraph
A DGLGraph or a batch of DGLGraphs.
feat : torch.Tensor
The input feature with shape :math:`(N, D)` where :math:`N` is the
The input node feature with shape :math:`(N, D)` where :math:`N` is the
number of nodes in the graph, and :math:`D` means the size of features.
Returns
......@@ -474,10 +455,9 @@ class GlobalAttentionPooling(nn.Module):
class Set2Set(nn.Module):
r"""
r"""Set2Set operator from `Order Matters: Sequence to sequence for sets
<https://arxiv.org/pdf/1511.06391.pdf>`__
Description
-----------
For each individual graph in the batch, set2set computes
.. math::
......@@ -641,7 +621,7 @@ def _gen_mask(lengths_x, lengths_y, max_len_x, max_len_y):
class MultiHeadAttention(nn.Module):
r"""Multi-Head Attention block, used in Transformer, Set Transformer and so on.
r"""Multi-Head Attention block, used in Transformer, Set Transformer and so on
Parameters
----------
......@@ -754,7 +734,8 @@ class MultiHeadAttention(nn.Module):
class SetAttentionBlock(nn.Module):
r"""SAB block introduced in Set-Transformer paper.
r"""SAB block from `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/abs/1810.00825>`__
Parameters
----------
......@@ -795,7 +776,8 @@ class SetAttentionBlock(nn.Module):
class InducedSetAttentionBlock(nn.Module):
r"""ISAB block introduced in Set-Transformer paper.
r"""ISAB block from `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/abs/1810.00825>`__
Parameters
----------
......@@ -867,7 +849,8 @@ class InducedSetAttentionBlock(nn.Module):
class PMALayer(nn.Module):
r"""Pooling by Multihead Attention, used as the Decoder Module in Set Transformer.
r"""Pooling by Multihead Attention from `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/abs/1810.00825>`__
Parameters
----------
......@@ -943,12 +926,8 @@ class PMALayer(nn.Module):
class SetTransformerEncoder(nn.Module):
r"""
Description
-----------
The Encoder module in `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/pdf/1810.00825.pdf>`__.
r"""The Encoder module from `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/pdf/1810.00825.pdf>`__
Parameters
----------
......@@ -1079,12 +1058,8 @@ class SetTransformerEncoder(nn.Module):
class SetTransformerDecoder(nn.Module):
r"""
Description
-----------
The Decoder module in `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/pdf/1810.00825.pdf>`__.
r"""The Decoder module from `Set Transformer: A Framework for Attention-based
Permutation-Invariant Neural Networks <https://arxiv.org/pdf/1810.00825.pdf>`__
Parameters
----------
......
......@@ -5,12 +5,9 @@ import torch.nn as nn
import torch.nn.functional as F
class EdgePredictor(nn.Module):
r"""
r"""Predictor/score function for pairs of node representations
Description
-----------
Predictor/score function for pairs of node representations. Given a pair of node
representations, :math:`h_i` and :math:`h_j`, it combines them with
Given a pair of node representations, :math:`h_i` and :math:`h_j`, it combines them with
**dot product**
......
......@@ -4,12 +4,9 @@ import torch
import torch.nn as nn
class TransE(nn.Module):
r"""
r"""Similarity measure from `Translating Embeddings for Modeling Multi-relational Data
<https://papers.nips.cc/paper/2013/hash/1cecc7a77928ca8133fa24680a88d2f9-Abstract.html>`__
Description
-----------
Similarity measure introduced in `Translating Embeddings for Modeling Multi-relational Data
<https://papers.nips.cc/paper/2013/hash/1cecc7a77928ca8133fa24680a88d2f9-Abstract.html>`__.
Mathematically, it is defined as follows:
.. math::
......
......@@ -4,14 +4,11 @@ import torch
import torch.nn as nn
class TransR(nn.Module):
r"""
Description
-----------
Similarity measure introduced in
r"""Similarity measure from
`Learning entity and relation embeddings for knowledge graph completion
<https://ojs.aaai.org/index.php/AAAI/article/view/9491>`__. Mathematically,
it is defined as follows:
<https://ojs.aaai.org/index.php/AAAI/article/view/9491>`__
Mathematically, it is defined as follows:
.. math::
......
......@@ -104,11 +104,7 @@ class Identity(nn.Module):
return x
class Sequential(nn.Sequential):
r"""
Description
-----------
A sequential container for stacking graph neural network modules.
r"""A sequential container for stacking graph neural network modules
DGL supports two modes: sequentially apply GNN modules on 1) the same graph or
2) a list of given graphs. In the second case, the number of graphs equals the
......@@ -227,11 +223,10 @@ class Sequential(nn.Sequential):
return feats
class WeightBasis(nn.Module):
r"""Basis decomposition module.
r"""Basis decomposition from `Modeling Relational Data with Graph
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__
Basis decomposition is introduced in "`Modeling Relational Data with Graph
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__"
and can be described as below:
It can be described as below:
.. math::
......@@ -284,13 +279,10 @@ class WeightBasis(nn.Module):
return weight.view(self.num_outputs, *self.shape)
class JumpingKnowledge(nn.Module):
r"""
r"""The Jumping Knowledge aggregation module from `Representation Learning on
Graphs with Jumping Knowledge Networks <https://arxiv.org/abs/1806.03536>`__
Description
-----------
The Jumping Knowledge aggregation module introduced in `Representation Learning on
Graphs with Jumping Knowledge Networks <https://arxiv.org/abs/1806.03536>`__. It
aggregates the output representations of multiple GNN layers with
It aggregates the output representations of multiple GNN layers with
**concatenation**
......
......@@ -9,8 +9,8 @@ from .... import function as fn
class APPNPConv(layers.Layer):
r"""Approximate Personalized Propagation of Neural Predictions
layer from paper `Predict then Propagate: Graph Neural Networks
meet Personalized PageRank <https://arxiv.org/pdf/1810.05997.pdf>`__.
layer from `Predict then Propagate: Graph Neural Networks
meet Personalized PageRank <https://arxiv.org/pdf/1810.05997.pdf>`__
.. math::
H^{0} & = X
......
......@@ -9,13 +9,9 @@ from .... import broadcast_nodes, function as fn
class ChebConv(layers.Layer):
r"""
Description
-----------
Chebyshev Spectral Graph Convolution layer from paper `Convolutional
r"""Chebyshev Spectral Graph Convolution layer from `Convolutional
Neural Networks on Graphs with Fast Localized Spectral Filtering
<https://arxiv.org/pdf/1606.09375.pdf>`__.
<https://arxiv.org/pdf/1606.09375.pdf>`__
.. math::
h_i^{l+1} &= \sum_{k=0}^{K-1} W^{k, l}z_i^{k, l}
......@@ -30,7 +26,6 @@ class ChebConv(layers.Layer):
where :math:`\tilde{A}` is :math:`A` + :math:`I`, :math:`W` is learnable weight.
Parameters
----------
in_feats: int
......@@ -79,11 +74,7 @@ class ChebConv(layers.Layer):
self.linear = layers.Dense(out_feats, use_bias=bias)
def call(self, graph, feat, lambda_max=None):
r"""
Description
-----------
Compute ChebNet layer.
r"""Compute ChebNet layer.
Parameters
----------
......
......@@ -6,13 +6,9 @@ import numpy as np
class DenseChebConv(layers.Layer):
r"""
Description
-----------
Chebyshev Spectral Graph Convolution layer from paper `Convolutional
r"""Chebyshev Spectral Graph Convolution layer from `Convolutional
Neural Networks on Graphs with Fast Localized Spectral Filtering
<https://arxiv.org/pdf/1606.09375.pdf>`__.
<https://arxiv.org/pdf/1606.09375.pdf>`__
We recommend to use this module when applying ChebConv on dense graphs.
......@@ -57,11 +53,7 @@ class DenseChebConv(layers.Layer):
self.bias = None
def call(self, adj, feat, lambda_max=None):
r"""
Description
-----------
Compute (Dense) Chebyshev Spectral Graph Convolution layer.
r"""Compute (Dense) Chebyshev Spectral Graph Convolution layer.
Parameters
----------
......
......@@ -9,21 +9,25 @@ from ....utils import expand_as_pair
class EdgeConv(layers.Layer):
r"""
Description
-----------
EdgeConv layer.
Introduced in "`Dynamic Graph CNN for Learning on Point Clouds
<https://arxiv.org/pdf/1801.07829>`__". Can be described as follows:
r"""EdgeConv layer from `Dynamic Graph CNN for Learning on Point Clouds
<https://arxiv.org/pdf/1801.07829>`__
It can be described as follows:
.. math::
h_i^{(l+1)} = \max_{j \in \mathcal{N}(i)} (
\Theta \cdot (h_j^{(l)} - h_i^{(l)}) + \Phi \cdot h_i^{(l)})
where :math:`\mathcal{N}(i)` is the neighbor of :math:`i`.
where :math:`\mathcal{N}(i)` is the neighbor of :math:`i`,
:math:`\Theta` and :math:`\Phi` are linear layers.
.. note::
The original formulation includes a ReLU inside the maximum operator.
This is equivalent to first applying a maximum operator then applying
the ReLU.
Parameters
----------
in_feat : int
......@@ -38,14 +42,18 @@ class EdgeConv(layers.Layer):
causing silent performance regression. This module will raise a DGLError if it detects
0-in-degree nodes in input graph. By setting ``True``, it will suppress the check
and let the users handle it by themselves. Default: ``False``.
Note
----
Zero in-degree nodes will lead to invalid output value. This is because no message
will be passed to those nodes, the aggregation function will be appied on empty input.
A common practice to avoid this is to add a self-loop for each node in the graph if
it is homogeneous, which can be achieved by:
>>> g = ... # a DGLGraph
>>> g = dgl.add_self_loop(g)
Calling ``add_self_loop`` will not work for some graphs, for example, heterogeneous graph
since the edge type can not be decided for self_loop edges. Set ``allow_zero_in_degree``
to ``True`` for those cases to unblock the code and handle zere-in-degree nodes manually.
......@@ -66,10 +74,8 @@ class EdgeConv(layers.Layer):
self.bn = layers.BatchNormalization()
def set_allow_zero_in_degree(self, set_value):
r"""
Description
-----------
Set allow_zero_in_degree flag.
r"""Set allow_zero_in_degree flag.
Parameters
----------
set_value : bool
......@@ -78,10 +84,8 @@ class EdgeConv(layers.Layer):
self._allow_zero_in_degree = set_value
def call(self, g, feat):
"""
Description
-----------
Forward computation
"""Forward computation
Parameters
----------
g : DGLGraph
......@@ -92,10 +96,12 @@ class EdgeConv(layers.Layer):
If a pair of tensors is given, the graph must be a uni-bipartite graph
with only one edge type, and the two tensors must have the same
dimensionality on all except the first axis.
Returns
-------
tf.Tensor or pair of tf.Tensor
New node features.
Raises
------
DGLError
......
......@@ -13,12 +13,8 @@ from ..utils import Identity
class GATConv(layers.Layer):
r"""
Description
-----------
Apply `Graph Attention Network <https://arxiv.org/pdf/1710.10903.pdf>`__
over an input signal.
r"""Graph Attention Layer from `Graph Attention Network
<https://arxiv.org/pdf/1710.10903.pdf>`__
.. math::
h_i^{(l+1)} = \sum_{j\in \mathcal{N}(i)} \alpha_{i,j} W^{(l)} h_j^{(l)}
......@@ -182,11 +178,7 @@ class GATConv(layers.Layer):
self.activation = activation
def set_allow_zero_in_degree(self, set_value):
r"""
Description
-----------
Set allow_zero_in_degree flag.
r"""Set allow_zero_in_degree flag.
Parameters
----------
......@@ -196,11 +188,7 @@ class GATConv(layers.Layer):
self._allow_zero_in_degree = set_value
def call(self, graph, feat, get_attention=False):
r"""
Description
-----------
Compute graph attention network layer.
r"""Compute graph attention network layer.
Parameters
----------
......
......@@ -8,12 +8,8 @@ from ....utils import expand_as_pair
class GINConv(layers.Layer):
r"""
Description
-----------
Graph Isomorphism Network layer from paper `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__.
r"""Graph Isomorphism Network layer from `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__
.. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
......@@ -80,11 +76,7 @@ class GINConv(layers.Layer):
self.eps = tf.Variable(initial_value=[init_eps], dtype=tf.float32, trainable=learn_eps)
def call(self, graph, feat):
r"""
Description
-----------
Compute Graph Isomorphism Network layer.
r"""Compute Graph Isomorphism Network layer.
Parameters
----------
......
......@@ -12,12 +12,10 @@ from ....utils import expand_as_pair
class GraphConv(layers.Layer):
r"""
r"""Graph convolution from `Semi-Supervised Classification with Graph Convolutional Networks
<https://arxiv.org/abs/1609.02907>`__
Description
-----------
Graph convolution was introduced in `GCN <https://arxiv.org/abs/1609.02907>`__
and mathematically is defined as follows:
Mathematically it is defined as follows:
.. math::
h_i^{(l+1)} = \sigma(b^{(l)} + \sum_{j\in\mathcal{N}(i)}\frac{1}{c_{ij}}h_j^{(l)}W^{(l)})
......@@ -170,11 +168,7 @@ class GraphConv(layers.Layer):
self._activation = activation
def set_allow_zero_in_degree(self, set_value):
r"""
Description
-----------
Set allow_zero_in_degree flag.
r"""Set allow_zero_in_degree flag.
Parameters
----------
......@@ -184,11 +178,7 @@ class GraphConv(layers.Layer):
self._allow_zero_in_degree = set_value
def call(self, graph, feat, weight=None):
r"""
Description
-----------
Compute graph convolution.
r"""Compute graph convolution.
Parameters
----------
......
......@@ -8,15 +8,10 @@ from .. import utils
class RelGraphConv(layers.Layer):
r"""
r"""Relational graph convolution layer from `Modeling Relational Data with Graph
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__
Description
-----------
Relational graph convolution layer.
Relational graph convolution is introduced in "`Modeling Relational Data with Graph
Convolutional Networks <https://arxiv.org/abs/1703.06103>`__"
and can be described as below:
It can be described as below:
.. math::
......@@ -248,7 +243,7 @@ class RelGraphConv(layers.Layer):
return {'msg': msg}
def call(self, g, x, etypes, norm=None):
""" Forward computation
"""Forward computation
Parameters
----------
......
......@@ -9,12 +9,8 @@ from ....utils import expand_as_pair, check_eq_shape
class SAGEConv(layers.Layer):
r"""
Description
-----------
GraphSAGE layer from paper `Inductive Representation Learning on
Large Graphs <https://arxiv.org/pdf/1706.02216.pdf>`__.
r"""GraphSAGE layer from `Inductive Representation Learning on
Large Graphs <https://arxiv.org/pdf/1706.02216.pdf>`__
.. math::
h_{\mathcal{N}(i)}^{(l+1)} &= \mathrm{aggregate}
......@@ -133,11 +129,7 @@ class SAGEConv(layers.Layer):
return {'neigh': rst}
def call(self, graph, feat):
r"""
Description
-----------
Compute GraphSAGE layer.
r"""Compute GraphSAGE layer.
Parameters
----------
......
......@@ -9,12 +9,8 @@ from ....base import DGLError
class SGConv(layers.Layer):
r"""
Description
-----------
Simplifying Graph Convolution layer from paper `Simplifying Graph
Convolutional Networks <https://arxiv.org/pdf/1902.07153.pdf>`__.
r"""SGC layer from `Simplifying Graph
Convolutional Networks <https://arxiv.org/pdf/1902.07153.pdf>`__
.. math::
H^{K} = (\tilde{D}^{-1/2} \tilde{A} \tilde{D}^{-1/2})^K X \Theta
......@@ -104,11 +100,7 @@ class SGConv(layers.Layer):
self._allow_zero_in_degree = allow_zero_in_degree
def set_allow_zero_in_degree(self, set_value):
r"""
Description
-----------
Set allow_zero_in_degree flag.
r"""Set allow_zero_in_degree flag.
Parameters
----------
......@@ -118,11 +110,7 @@ class SGConv(layers.Layer):
self._allow_zero_in_degree = set_value
def call(self, graph, feat):
r"""
Description
-----------
Compute Simplifying Graph Convolution layer.
r"""Compute Simplifying Graph Convolution layer.
Parameters
----------
......
......@@ -113,8 +113,8 @@ class MaxPooling(layers.Layer):
class SortPooling(layers.Layer):
r"""Apply Sort Pooling (`An End-to-End Deep Learning Architecture for Graph Classification
<https://www.cse.wustl.edu/~ychen/public/DGCNN.pdf>`__) over the nodes in the graph.
r"""Sort Pooling from `An End-to-End Deep Learning Architecture for Graph Classification
<https://www.cse.wustl.edu/~ychen/public/DGCNN.pdf>`__
Parameters
----------
......@@ -134,7 +134,7 @@ class SortPooling(layers.Layer):
graph : DGLGraph
The graph.
feat : tf.Tensor
The input feature with shape :math:`(N, D)` where
The input node feature with shape :math:`(N, D)` where
:math:`N` is the number of nodes in the graph.
Returns
......@@ -154,8 +154,8 @@ class SortPooling(layers.Layer):
class GlobalAttentionPooling(layers.Layer):
r"""Apply Global Attention Pooling (`Gated Graph Sequence Neural Networks
<https://arxiv.org/abs/1511.05493.pdf>`__) over the nodes in the graph.
r"""Global Attention Pooling from `Gated Graph Sequence Neural Networks
<https://arxiv.org/abs/1511.05493.pdf>`__
.. math::
r^{(i)} = \sum_{k=1}^{N_i}\mathrm{softmax}\left(f_{gate}
......@@ -183,7 +183,7 @@ class GlobalAttentionPooling(layers.Layer):
graph : DGLGraph
The graph.
feat : tf.Tensor
The input feature with shape :math:`(N, D)` where
The input node feature with shape :math:`(N, D)` where
:math:`N` is the number of nodes in the graph.
Returns
......
......@@ -496,7 +496,7 @@ def sample_neighbors_biased(g, nodes, fanout, bias, edge_dir='in',
Sort the graph (necessary!)
>>> g_sorted = dgl.transform.sort_csr_by_tag(g, tag)
>>> g_sorted = dgl.transforms.sort_csr_by_tag(g, tag)
>>> g_sorted.ndata['_TAG_OFFSET']
tensor([[0, 1, 2],
[0, 2, 2],
......
......@@ -5,7 +5,7 @@ import numpy as np
from ..utils import recursive_apply, recursive_apply_pair
from ..base import EID
from .. import backend as F
from .. import transform, utils
from .. import transforms, utils
def _locate_eids_to_exclude(frontier_parent_eids, exclude_eids):
"""Find the edges whose IDs in parent graph appeared in exclude_eids.
......@@ -63,7 +63,7 @@ class EidExcluder(object):
# to the mapping from the new graph to the old frontier.
# So we need to test if located_eids is empty, and do the remapping ourselves.
if len(located_eids) > 0:
frontier = transform.remove_edges(
frontier = transforms.remove_edges(
frontier, located_eids, store_ids=True)
frontier.edata[EID] = F.gather_row(parent_eids, frontier.edata[EID])
else:
......@@ -72,7 +72,7 @@ class EidExcluder(object):
new_eids = parent_eids.copy()
for k, v in located_eids.items():
if len(v) > 0:
frontier = transform.remove_edges(
frontier = transforms.remove_edges(
frontier, v, etype=k, store_ids=True)
new_eids[k] = F.gather_row(parent_eids[k], frontier.edges[k].data[EID])
frontier.edata[EID] = new_eids
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment