"examples/pytorch/git@developer.sourcefind.cn:OpenDAS/dgl.git" did not exist on "7bad9178baa2742d633b030c44b3ea5db626942e"
Commit 886ef6bc authored by zhiqiang's avatar zhiqiang Committed by Facebook Github Bot
Browse files

Remove duplicate code (#754)

Summary:
Remove duplicate definition of PositionalEmbedding in `lightconv.py`
Pull Request resolved: https://github.com/pytorch/fairseq/pull/754

Differential Revision: D15451443

Pulled By: myleott

fbshipit-source-id: a3d82ab2c1335d66be3c5d67a07893162d138c7a
parent 4604b4a5
...@@ -23,10 +23,9 @@ from fairseq.modules import ( ...@@ -23,10 +23,9 @@ from fairseq.modules import (
AdaptiveSoftmax, AdaptiveSoftmax,
DynamicConv1dTBC, DynamicConv1dTBC,
LayerNorm, LayerNorm,
LearnedPositionalEmbedding, PositionalEmbedding,
LightweightConv1dTBC, LightweightConv1dTBC,
MultiheadAttention, MultiheadAttention,
SinusoidalPositionalEmbedding,
) )
...@@ -665,20 +664,6 @@ def Linear(in_features, out_features, bias=True): ...@@ -665,20 +664,6 @@ def Linear(in_features, out_features, bias=True):
return m return m
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx, learned=False):
if learned:
m = LearnedPositionalEmbedding(
num_embeddings + padding_idx + 1, embedding_dim, padding_idx,
)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(
embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1,
)
return m
@register_model_architecture('lightconv', 'lightconv') @register_model_architecture('lightconv', 'lightconv')
def base_architecture(args): def base_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment