Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
c2aaa501
Commit
c2aaa501
authored
Sep 07, 2020
by
Jason Phang
Browse files
combine gpt2
parent
2d4b3a8c
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
18 additions
and
32 deletions
+18
-32
gpt2.py
gpt2.py
+0
-28
models/gpt2.py
models/gpt2.py
+18
-4
No files found.
gpt2.py
deleted
100644 → 0
View file @
2d4b3a8c
import
transformers
from
base
import
LM
import
torch
import
torch.nn.functional
as
F
class
GPT2LM
(
LM
):
def
__init__
(
self
,
dev
=
'cpu'
):
self
.
gpt2
=
transformers
.
GPT2LMHeadModel
.
from_pretrained
(
'gpt2'
).
to
(
dev
)
self
.
tok
=
transformers
.
GPT2Tokenizer
.
from_pretrained
(
'gpt2'
)
self
.
dev
=
dev
def
generate
(
self
,
context
,
until
):
context
=
torch
.
tensor
([
self
.
tok
.
encode
(
context
.
strip
())],
dtype
=
torch
.
long
).
to
(
self
.
dev
)
res
=
self
.
gpt2
.
generate
(
context
,
eos_token_id
=
self
.
tok
.
encoder
[
until
],
do_sample
=
False
,
max_length
=
1024
)
# chop off the prompt and the final eos token
return
self
.
tok
.
decode
(
res
[
0
][
len
(
context
[
0
]):
-
1
]).
strip
()
def
loglikelihood
(
self
,
context
,
continuation
):
print
(
'likelihood:'
,
context
,
continuation
)
inp
=
torch
.
tensor
([
self
.
tok
.
encode
(
context
+
continuation
)],
dtype
=
torch
.
long
).
to
(
self
.
dev
)
ctxlen
=
len
(
self
.
tok
.
encode
(
context
.
strip
()))
cont_toks
=
inp
[:,
ctxlen
:]
# [batch, seq]
logits
=
F
.
log_softmax
(
self
.
gpt2
(
inp
)[
0
],
dim
=-
1
)[:,
ctxlen
-
1
:
-
1
]
# [batch, seq, vocab]
return
torch
.
gather
(
logits
,
2
,
cont_toks
.
unsqueeze
(
-
1
)).
squeeze
(
-
1
)
models/gpt2.py
View file @
c2aaa501
import
transformers
import
transformers
import
torch
import
torch
import
torch.nn.functional
as
F
from
..base
import
LM
from
..base
import
LM
from
..
import
utils
from
.
import
MODEL_REGISTRY
from
.
import
MODEL_REGISTRY
@
MODEL_REGISTRY
.
register
(
"gpt2"
)
@
MODEL_REGISTRY
.
register
(
"gpt2"
)
class
GPT2LM
(
LM
):
class
GPT2LM
(
LM
):
def
__init__
(
self
):
def
__init__
(
self
,
device
=
"cpu"
):
self
.
gpt2
=
transformers
.
GPT2LMHeadModel
.
from_pretrained
(
'gpt2'
)
self
.
gpt2
=
transformers
.
GPT2LMHeadModel
.
from_pretrained
(
'gpt2'
)
self
.
tokenizer
=
transformers
.
GPT2Tokenizer
.
from_pretrained
(
'gpt2'
)
self
.
tokenizer
=
transformers
.
GPT2Tokenizer
.
from_pretrained
(
'gpt2'
)
self
.
device
=
device
@
classmethod
def
create_from_args
(
cls
,
arg_string
):
args
=
utils
.
simple_parse_args_string
(
arg_string
)
return
cls
(
device
=
args
.
get
(
"device"
,
"cpu"
))
def
generate
(
self
,
context
,
max_gen_length
):
def
generate
(
self
,
context
,
max_gen_length
):
context
=
torch
.
tensor
([
self
.
tok
enizer
.
encode
(
context
.
strip
())],
dtype
=
torch
.
long
)
context
=
torch
.
tensor
([
self
.
tok
.
encode
(
context
.
strip
())],
dtype
=
torch
.
long
)
.
to
(
self
.
device
)
res
=
self
.
gpt2
.
generate
(
res
=
self
.
gpt2
.
generate
(
context
,
context
,
eos_token_id
=
self
.
tokenizer
.
eos_token_id
,
eos_token_id
=
self
.
tokenizer
.
eos_token_id
,
...
@@ -23,4 +31,10 @@ class GPT2LM(LM):
...
@@ -23,4 +31,10 @@ class GPT2LM(LM):
return
self
.
tok
.
decode
(
res
[
0
][
len
(
context
[
0
]):
-
1
]).
strip
()
return
self
.
tok
.
decode
(
res
[
0
][
len
(
context
[
0
]):
-
1
]).
strip
()
def
loglikelihood
(
self
,
context
,
continuation
):
def
loglikelihood
(
self
,
context
,
continuation
):
pass
inp
=
torch
.
tensor
([
self
.
tok
.
encode
(
context
+
continuation
)],
dtype
=
torch
.
long
).
to
(
self
.
device
)
ctxlen
=
len
(
self
.
tok
.
encode
(
context
.
strip
()))
cont_toks
=
inp
[:,
ctxlen
:]
# [batch, seq]
logits
=
F
.
log_softmax
(
self
.
gpt2
(
inp
)[
0
],
dim
=-
1
)[:,
ctxlen
-
1
:
-
1
]
# [batch, seq, vocab]
return
torch
.
gather
(
logits
,
2
,
cont_toks
.
unsqueeze
(
-
1
)).
squeeze
(
-
1
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment