Commit 43dee065 authored by daniel-furman's avatar daniel-furman
Browse files

llama test

parent 2e27053d
......@@ -689,7 +689,9 @@ class HFLM(LM):
"""
new_reqs = []
for req in requests:
context, continuation = req.args[0].strip(), req.args[1]
context = req.args[0].strip()
system_prompt = "You are a helpful assistant."
# arc experiment with few-shot formatting
import re
elements = re.split('Answer:|Question:', context.replace('\n', ' '))
......@@ -697,7 +699,7 @@ class HFLM(LM):
for element in elements[1:-1]:
new_elements.append(element.strip())
new_elements
chat = [{"role": "system", "content": f"You are a helpful and concise assistant."}]
chat = [{"role": "system", "content": system_prompt}]
for i in range(len(new_elements)):
if i % 2 == 0:
chat.append({"role": "user", "content": f"Question: {new_elements[i]} Answer:"})
......@@ -708,7 +710,7 @@ class HFLM(LM):
tokenize=False,
add_generation_prompt=True,
)
req.args = (context, continuation)
req.args = (context, req.args[1])
new_reqs.append(req)
return new_reqs
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment