Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
e4e55af7
Unverified
Commit
e4e55af7
authored
Sep 18, 2023
by
Sanchit Gandhi
Committed by
GitHub
Sep 18, 2023
Browse files
[Wav2Vec2-Conf / LLaMA] Style fix (#26188)
* torch.nn -> nn * fix llama * copies
parent
8b5da9fc
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
9 additions
and
9 deletions
+9
-9
src/transformers/models/deprecated/open_llama/modeling_open_llama.py
...rmers/models/deprecated/open_llama/modeling_open_llama.py
+1
-1
src/transformers/models/llama/modeling_llama.py
src/transformers/models/llama/modeling_llama.py
+1
-1
src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py
.../models/wav2vec2_conformer/modeling_wav2vec2_conformer.py
+7
-7
No files found.
src/transformers/models/deprecated/open_llama/modeling_open_llama.py
View file @
e4e55af7
...
...
@@ -99,7 +99,7 @@ class OpenLlamaRMSNorm(nn.Module):
# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->OpenLlama
class
OpenLlamaRotaryEmbedding
(
torch
.
nn
.
Module
):
class
OpenLlamaRotaryEmbedding
(
nn
.
Module
):
def
__init__
(
self
,
dim
,
max_position_embeddings
=
2048
,
base
=
10000
,
device
=
None
):
super
().
__init__
()
...
...
src/transformers/models/llama/modeling_llama.py
View file @
e4e55af7
...
...
@@ -89,7 +89,7 @@ class LlamaRMSNorm(nn.Module):
return
self
.
weight
*
hidden_states
.
to
(
input_dtype
)
class
LlamaRotaryEmbedding
(
torch
.
nn
.
Module
):
class
LlamaRotaryEmbedding
(
nn
.
Module
):
def
__init__
(
self
,
dim
,
max_position_embeddings
=
2048
,
base
=
10000
,
device
=
None
):
super
().
__init__
()
...
...
src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py
View file @
e4e55af7
...
...
@@ -584,7 +584,7 @@ class Wav2Vec2ConformerConvolutionModule(nn.Module):
if
(
config
.
conv_depthwise_kernel_size
-
1
)
%
2
==
1
:
raise
ValueError
(
"`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding"
)
self
.
layer_norm
=
nn
.
LayerNorm
(
config
.
hidden_size
)
self
.
pointwise_conv1
=
torch
.
nn
.
Conv1d
(
self
.
pointwise_conv1
=
nn
.
Conv1d
(
config
.
hidden_size
,
2
*
config
.
hidden_size
,
kernel_size
=
1
,
...
...
@@ -592,8 +592,8 @@ class Wav2Vec2ConformerConvolutionModule(nn.Module):
padding
=
0
,
bias
=
False
,
)
self
.
glu
=
torch
.
nn
.
GLU
(
dim
=
1
)
self
.
depthwise_conv
=
torch
.
nn
.
Conv1d
(
self
.
glu
=
nn
.
GLU
(
dim
=
1
)
self
.
depthwise_conv
=
nn
.
Conv1d
(
config
.
hidden_size
,
config
.
hidden_size
,
config
.
conv_depthwise_kernel_size
,
...
...
@@ -602,9 +602,9 @@ class Wav2Vec2ConformerConvolutionModule(nn.Module):
groups
=
config
.
hidden_size
,
bias
=
False
,
)
self
.
batch_norm
=
torch
.
nn
.
BatchNorm1d
(
config
.
hidden_size
)
self
.
batch_norm
=
nn
.
BatchNorm1d
(
config
.
hidden_size
)
self
.
activation
=
ACT2FN
[
config
.
hidden_act
]
self
.
pointwise_conv2
=
torch
.
nn
.
Conv1d
(
self
.
pointwise_conv2
=
nn
.
Conv1d
(
config
.
hidden_size
,
config
.
hidden_size
,
kernel_size
=
1
,
...
...
@@ -612,7 +612,7 @@ class Wav2Vec2ConformerConvolutionModule(nn.Module):
padding
=
0
,
bias
=
False
,
)
self
.
dropout
=
torch
.
nn
.
Dropout
(
config
.
conformer_conv_dropout
)
self
.
dropout
=
nn
.
Dropout
(
config
.
conformer_conv_dropout
)
def
forward
(
self
,
hidden_states
):
hidden_states
=
self
.
layer_norm
(
hidden_states
)
...
...
@@ -798,7 +798,7 @@ class Wav2Vec2ConformerEncoderLayer(nn.Module):
# Self-Attention
self
.
self_attn_layer_norm
=
nn
.
LayerNorm
(
embed_dim
)
self
.
self_attn_dropout
=
torch
.
nn
.
Dropout
(
dropout
)
self
.
self_attn_dropout
=
nn
.
Dropout
(
dropout
)
self
.
self_attn
=
Wav2Vec2ConformerSelfAttention
(
config
)
# Conformer Convolution
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment