Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
34b32f77
Commit
34b32f77
authored
Jan 09, 2024
by
daniel-furman
Browse files
first stab at wrap_chat_template, various
parent
6c68fd16
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
14 additions
and
13 deletions
+14
-13
lm_eval/models/huggingface.py
lm_eval/models/huggingface.py
+14
-13
No files found.
lm_eval/models/huggingface.py
View file @
34b32f77
...
@@ -671,27 +671,19 @@ class HFLM(LM):
...
@@ -671,27 +671,19 @@ class HFLM(LM):
for
req
in
requests
:
for
req
in
requests
:
context
,
continuation
=
req
.
args
[
0
].
strip
(),
req
.
args
[
1
].
strip
()
context
,
continuation
=
req
.
args
[
0
].
strip
(),
req
.
args
[
1
].
strip
()
chat
=
[
chat
=
[
{
"role"
:
"
user
"
,
"content"
:
context
},
#
{"role": "
system
", "content":
"You are a helpful, respectful and honest assistant."
},
{
"role"
:
"
assistant
"
,
"content"
:
cont
inuation
},
{
"role"
:
"
user
"
,
"content"
:
cont
ext
},
]
]
single_tokenized_conversation
=
self
.
tokenizer
.
apply_chat_template
(
context
=
self
.
tokenizer
.
apply_chat_template
(
chat
,
chat
,
tokenize
=
False
,
tokenize
=
False
,
add_generation_prompt
=
True
,
add_generation_prompt
=
True
,
)
)
rfind_continuation
=
single_tokenized_conversation
.
rfind
(
continuation
)
context
=
single_tokenized_conversation
[:
rfind_continuation
]
continuation
=
single_tokenized_conversation
[
rfind_continuation
:]
# remove special chars from continuation
continuation
=
self
.
tokenizer
.
decode
(
self
.
tokenizer
.
encode
(
continuation
),
skip_special_tokens
=
True
)
req
.
args
=
(
context
,
continuation
)
req
.
args
=
(
context
,
continuation
)
new_reqs
.
append
(
req
)
new_reqs
.
append
(
req
)
return
new_reqs
return
new_reqs
def
_model_call
(
self
,
inps
,
attn_mask
=
None
,
labels
=
None
):
def
_model_call
(
self
,
inps
,
attn_mask
=
None
,
labels
=
None
):
"""
"""
...
@@ -773,9 +765,12 @@ class HFLM(LM):
...
@@ -773,9 +765,12 @@ class HFLM(LM):
return
context_enc
,
continuation_enc
return
context_enc
,
continuation_enc
def
loglikelihood
(
self
,
requests
:
List
[
Instance
])
->
List
[
Tuple
[
float
,
bool
]]:
def
loglikelihood
(
self
,
requests
:
List
[
Instance
])
->
List
[
Tuple
[
float
,
bool
]]:
print
(
"Loglikelihood invoked"
)
print
(
f
"First element before prompt formatting...
\n
{
requests
[
0
].
args
}
"
)
print
(
f
"First element before prompt formatting...
\n
{
requests
[
0
].
args
}
"
)
requests
=
self
.
tok_wrap_chat_template
(
requests
)
requests
=
self
.
tok_wrap_chat_template
(
requests
)
print
(
f
"First element after prompt formatting...
\n
{
requests
[
0
].
args
}
"
)
print
(
f
"First element after prompt formatting...
\n
{
requests
[
0
].
args
}
"
)
new_reqs
=
[]
new_reqs
=
[]
for
context
,
continuation
in
[
req
.
args
for
req
in
requests
]:
for
context
,
continuation
in
[
req
.
args
for
req
in
requests
]:
if
context
==
""
:
if
context
==
""
:
...
@@ -1049,6 +1044,12 @@ class HFLM(LM):
...
@@ -1049,6 +1044,12 @@ class HFLM(LM):
return
re_ord
.
get_original
(
res
)
return
re_ord
.
get_original
(
res
)
def
generate_until
(
self
,
requests
:
List
[
Instance
])
->
List
[
str
]:
def
generate_until
(
self
,
requests
:
List
[
Instance
])
->
List
[
str
]:
print
(
"Generate_until invoked"
)
print
(
f
"First element before prompt formatting...
\n
{
requests
[
0
].
args
}
"
)
requests
=
self
.
tok_wrap_chat_template
(
requests
)
print
(
f
"First element after prompt formatting...
\n
{
requests
[
0
].
args
}
"
)
res
=
[]
res
=
[]
def
_collate
(
x
):
def
_collate
(
x
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment