Commit 3ee4c2e2 authored by Matt Hoffner's avatar Matt Hoffner
Browse files

rename to LlamaCppLM

parent 8f992eb3
......@@ -16,7 +16,7 @@ MODEL_REGISTRY = {
"anthropic": anthropic_llms.AnthropicLM,
"textsynth": textsynth.TextSynthLM,
"dummy": dummy.DummyLM,
"llama": llama.LlamaLM
"llama": llama.LlamaCppLM
}
......
......@@ -18,7 +18,7 @@ def llama_completion(base_url, prompt, **kwargs):
print(f"RequestException: {e}")
return None
class LlamaLM(BaseLM):
class LlamaCppLM(BaseLM):
def __init__(self, base_url, truncate=False):
super().__init__()
self.base_url = base_url
......
import unittest
from unittest.mock import MagicMock
from lm_eval.models.llama import LlamaLM
from lm_eval.models.llama import LlamaCppLM
class LlamaLMTest(unittest.TestCase):
class LlamaCppLMTest(unittest.TestCase):
def test_loglikelihood(self):
base_url = "https://matthoffner-ggml-llm-api.hf.space"
lm = LlamaLM(base_url)
lm = LlamaCppLM(base_url)
# Create a MagicMock object to mock llama_completion
llama_completion_mock = MagicMock()
......@@ -29,7 +29,7 @@ class LlamaLMTest(unittest.TestCase):
def test_greedy_until(self):
base_url = "https://matthoffner-ggml-llm-api.hf.space"
lm = LlamaLM(base_url)
lm = LlamaCppLM(base_url)
# Define the llama_completion method with the desired behavior
def llama_completion_mock(url, context, stop=None):
......@@ -48,8 +48,5 @@ class LlamaLMTest(unittest.TestCase):
expected_res = ["generated_text1", "generated_text2"]
self.assertEqual(res, expected_res)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment