Commit 3ee4c2e2 authored by Matt Hoffner's avatar Matt Hoffner
Browse files

rename to LlamaCppLM

parent 8f992eb3
...@@ -16,7 +16,7 @@ MODEL_REGISTRY = { ...@@ -16,7 +16,7 @@ MODEL_REGISTRY = {
"anthropic": anthropic_llms.AnthropicLM, "anthropic": anthropic_llms.AnthropicLM,
"textsynth": textsynth.TextSynthLM, "textsynth": textsynth.TextSynthLM,
"dummy": dummy.DummyLM, "dummy": dummy.DummyLM,
"llama": llama.LlamaLM "llama": llama.LlamaCppLM
} }
......
...@@ -18,7 +18,7 @@ def llama_completion(base_url, prompt, **kwargs): ...@@ -18,7 +18,7 @@ def llama_completion(base_url, prompt, **kwargs):
print(f"RequestException: {e}") print(f"RequestException: {e}")
return None return None
class LlamaLM(BaseLM): class LlamaCppLM(BaseLM):
def __init__(self, base_url, truncate=False): def __init__(self, base_url, truncate=False):
super().__init__() super().__init__()
self.base_url = base_url self.base_url = base_url
......
import unittest import unittest
from unittest.mock import MagicMock from unittest.mock import MagicMock
from lm_eval.models.llama import LlamaLM from lm_eval.models.llama import LlamaCppLM
class LlamaLMTest(unittest.TestCase): class LlamaCppLMTest(unittest.TestCase):
def test_loglikelihood(self): def test_loglikelihood(self):
base_url = "https://matthoffner-ggml-llm-api.hf.space" base_url = "https://matthoffner-ggml-llm-api.hf.space"
lm = LlamaLM(base_url) lm = LlamaCppLM(base_url)
# Create a MagicMock object to mock llama_completion # Create a MagicMock object to mock llama_completion
llama_completion_mock = MagicMock() llama_completion_mock = MagicMock()
...@@ -29,7 +29,7 @@ class LlamaLMTest(unittest.TestCase): ...@@ -29,7 +29,7 @@ class LlamaLMTest(unittest.TestCase):
def test_greedy_until(self): def test_greedy_until(self):
base_url = "https://matthoffner-ggml-llm-api.hf.space" base_url = "https://matthoffner-ggml-llm-api.hf.space"
lm = LlamaLM(base_url) lm = LlamaCppLM(base_url)
# Define the llama_completion method with the desired behavior # Define the llama_completion method with the desired behavior
def llama_completion_mock(url, context, stop=None): def llama_completion_mock(url, context, stop=None):
...@@ -48,8 +48,5 @@ class LlamaLMTest(unittest.TestCase): ...@@ -48,8 +48,5 @@ class LlamaLMTest(unittest.TestCase):
expected_res = ["generated_text1", "generated_text2"] expected_res = ["generated_text1", "generated_text2"]
self.assertEqual(res, expected_res) self.assertEqual(res, expected_res)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment