Commit b6c75ed1 authored by daniel-furman's avatar daniel-furman
Browse files

llama test

parent 021232be
...@@ -670,32 +670,6 @@ class HFLM(LM): ...@@ -670,32 +670,6 @@ class HFLM(LM):
new_reqs = [] new_reqs = []
for req in requests: for req in requests:
context, continuation = req.args[0].strip(), req.args[1].strip() context, continuation = req.args[0].strip(), req.args[1].strip()
# arc experiment with few-shot formatting
"""import re
elements = re.split('Answer:|Question:', context.replace('\n', ' '))
new_elements = []
for element in elements[1:-1]:
new_elements.append(element.strip())
new_elements
chat = []
for i in range(len(new_elements)):
if i % 2 == 0:
chat.append({"role": "user", "content": f"Question: {new_elements[i]} Answer:"})
else:
chat.append({"role": "assistant", "content": f"{new_elements[i]}"})"""
chat = [
{"role": "system", "content": "You are a helpful, respectful and honest assistant."},
{"role": "user", "content": context},
{"role": "assistant", "content": continuation},
]
context = self.tokenizer.apply_chat_template(
chat,
tokenize=False,
add_generation_prompt=True,
)
print(context)
chat = [ chat = [
{"role": "system", "content": "You are a helpful, respectful and honest assistant."}, {"role": "system", "content": "You are a helpful, respectful and honest assistant."},
{"role": "user", "content": context}, {"role": "user", "content": context},
...@@ -705,8 +679,6 @@ class HFLM(LM): ...@@ -705,8 +679,6 @@ class HFLM(LM):
tokenize=False, tokenize=False,
add_generation_prompt=True, add_generation_prompt=True,
) )
print(context)
print("\n")
req.args = (context, continuation) req.args = (context, continuation)
new_reqs.append(req) new_reqs.append(req)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment