Commit 7191904f authored by daniel-furman's avatar daniel-furman
Browse files

first stab at wrap_chat_template, arc conversation test

parent 59e3b17c
...@@ -326,7 +326,6 @@ def evaluate( ...@@ -326,7 +326,6 @@ def evaluate(
# put responses from model into a list of length K for each request. # put responses from model into a list of length K for each request.
for x, req in zip(resps, cloned_reqs): for x, req in zip(resps, cloned_reqs):
print(x)
req.resps.append(x) req.resps.append(x)
if lm.world_size > 1: if lm.world_size > 1:
......
...@@ -667,13 +667,32 @@ class HFLM(LM): ...@@ -667,13 +667,32 @@ class HFLM(LM):
""" """
Utility for adding chat templates via the apply_chat_template() method Utility for adding chat templates via the apply_chat_template() method
""" """
import re
new_reqs = [] new_reqs = []
for req in requests: for req in requests:
context, continuation = req.args[0].strip(), req.args[1].strip() context, continuation = req.args[0].strip(), req.args[1].strip()
chat = [
#{"role": "system", "content": "You are a helpful, respectful and honest assistant."}, # Regex pattern splits on substrings "; " and ", "
{"role": "user", "content": context}, elements = re.split('Answer: |Question: ', context.replace('\n', ' '))
]
new_elements = []
for element in elements[1:]:
new_elements.append(element.strip())
new_elements
chat = []
for i in range(len(new_elements)):
if i % 2 == 0:
chat.append({"role": "user", "content": f"Question: {new_elements[i]} Answer:"})
else:
chat.append({"role": "assistant", "content": f"{new_elements[i]}"})
#chat = [
# #{"role": "system", "content": "You are a helpful, respectful and honest assistant."},
# {"role": "user", "content": context},
#]
context = self.tokenizer.apply_chat_template( context = self.tokenizer.apply_chat_template(
chat, chat,
tokenize=False, tokenize=False,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment