Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
GLM-4V_pytorch
Commits
e0b10e40
Commit
e0b10e40
authored
Jul 22, 2024
by
wanglch
Browse files
Initial commit
parent
ce623fe2
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
4 additions
and
2 deletions
+4
-2
visual.py
visual.py
+4
-2
No files found.
visual.py
View file @
e0b10e40
...
...
@@ -6,6 +6,8 @@ from transformers.activations import ACT2FN
import
math
from
torch.nn
import
LayerNorm
device
=
torch
.
device
(
"cuda"
if
torch
.
cuda
.
is_available
()
else
"cpu"
)
def
standard_attention
(
query_layer
,
key_layer
,
value_layer
,
scaling_attention_score
=
True
):
if
scaling_attention_score
:
query_layer
=
query_layer
/
math
.
sqrt
(
query_layer
.
shape
[
-
1
])
...
...
@@ -39,7 +41,7 @@ class PatchEmbedding(nn.Module):
self
.
position_embedding
=
nn
.
Embedding
(
config
.
num_positions
,
config
.
hidden_size
)
def
forward
(
self
,
images
:
"tensor(B, C, H, W)"
)
->
"tensor(B, L, D)"
:
x
=
self
.
proj
(
images
)
x
=
self
.
proj
(
images
)
.
to
(
device
)
x
=
x
.
flatten
(
2
).
transpose
(
1
,
2
)
cls_token
=
self
.
cls_embedding
.
expand
(
x
.
shape
[
0
],
-
1
,
-
1
)
x
=
torch
.
cat
((
cls_token
,
x
),
dim
=
1
)
...
...
@@ -66,7 +68,7 @@ class Attention(nn.Module):
out
=
attention_fn_default
(
q
,
k
,
v
)
output
=
self
.
dense
(
out
.
transpose
(
1
,
2
).
view
(
B
,
L
,
-
1
))
output
=
self
.
dense
(
out
.
transpose
(
1
,
2
).
reshape
(
B
,
L
,
-
1
))
output
=
self
.
output_dropout
(
output
)
return
output
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment