"test/srt/models/lora/test_lora.py" did not exist on "712216928fa252d6592a1518579018a69cb72bfe"
Unverified Commit 7882043b authored by Kiersten Stokes's avatar Kiersten Stokes Committed by GitHub
Browse files

Fix package extras for watsonx support (#2426)



* Update pyproject.toml with watsonx package extra
Signed-off-by: default avatarkiersten-stokes <kierstenstokes@gmail.com>

* Remove unused function
Signed-off-by: default avatarkiersten-stokes <kierstenstokes@gmail.com>

---------
Signed-off-by: default avatarkiersten-stokes <kierstenstokes@gmail.com>
parent 1185e89a
......@@ -158,7 +158,7 @@ class WatsonxLLM(LM):
project_id = watsonx_credentials.get("project_id", None)
deployment_id = watsonx_credentials.get("deployment_id", None)
client.set.default_project(project_id)
self.generate_params = generate_params
self.generate_params = generate_params or {}
self.model = ModelInference(
model_id=model_id,
deployment_id=deployment_id,
......@@ -167,12 +167,6 @@ class WatsonxLLM(LM):
)
self._model_id = model_id
def dump_parameters(self):
"""
Dumps the model's parameters into a serializable format.
"""
return self._parameters.model_dump()
@staticmethod
def _has_stop_token(response_tokens: List[str], context_tokens: List[str]) -> bool:
"""
......
......@@ -62,6 +62,7 @@ dev = ["pytest", "pytest-cov", "pytest-xdist", "pre-commit", "mypy"]
deepsparse = ["deepsparse-nightly[llm]>=1.8.0.20240404"]
gptq = ["auto-gptq[triton]>=0.6.0"]
hf_transfer = ["hf_transfer"]
ibm_watsonx_ai = ["ibm_watsonx_ai"]
ifeval = ["langdetect", "immutabledict", "nltk>=3.9.1"]
neuronx = ["optimum[neuronx]"]
mamba = ["mamba_ssm", "causal-conv1d==1.0.2"]
......@@ -81,6 +82,7 @@ all = [
"lm_eval[deepsparse]",
"lm_eval[gptq]",
"lm_eval[hf_transfer]",
"lm_eval[ibm_watsonx_ai]",
"lm_eval[ifeval]",
"lm_eval[mamba]",
"lm_eval[math]",
......@@ -93,7 +95,6 @@ all = [
"lm_eval[vllm]",
"lm_eval[zeno]",
"lm_eval[wandb]",
"lm_eval[ibm_watsonx_ai]"
]
[tool.ruff.lint]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment