Commit 62d39a23 authored by Victor Nova's avatar Victor Nova
Browse files

add device and dtype parameters to StableEmbedding

parent 1efb87d8
...@@ -38,6 +38,8 @@ class StableEmbedding(torch.nn.Embedding): ...@@ -38,6 +38,8 @@ class StableEmbedding(torch.nn.Embedding):
scale_grad_by_freq: bool = False, scale_grad_by_freq: bool = False,
sparse: bool = False, sparse: bool = False,
_weight: Optional[Tensor] = None, _weight: Optional[Tensor] = None,
device=None,
dtype=None,
) -> None: ) -> None:
super(StableEmbedding, self).__init__( super(StableEmbedding, self).__init__(
num_embeddings, num_embeddings,
...@@ -48,8 +50,10 @@ class StableEmbedding(torch.nn.Embedding): ...@@ -48,8 +50,10 @@ class StableEmbedding(torch.nn.Embedding):
scale_grad_by_freq, scale_grad_by_freq,
sparse, sparse,
_weight, _weight,
device,
dtype,
) )
self.norm = torch.nn.LayerNorm(embedding_dim) self.norm = torch.nn.LayerNorm(embedding_dim, device=device)
GlobalOptimManager.get_instance().register_module_override( GlobalOptimManager.get_instance().register_module_override(
self, "weight", {"optim_bits": 32} self, "weight", {"optim_bits": 32}
) )
...@@ -81,7 +85,10 @@ class StableEmbedding(torch.nn.Embedding): ...@@ -81,7 +85,10 @@ class StableEmbedding(torch.nn.Embedding):
self.sparse, self.sparse,
) )
return self.norm(emb) # always apply layer norm in full precision
emb = emb.to(torch.get_default_dtype())
return self.norm(emb).to(self.weight.dtype)
class Embedding(torch.nn.Embedding): class Embedding(torch.nn.Embedding):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment