Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
3b7dbef9
Unverified
Commit
3b7dbef9
authored
Mar 14, 2025
by
daniel-salib
Committed by
GitHub
Mar 14, 2025
Browse files
use verify_certificate flag in batch requests (#2785)
parent
91264653
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
81 additions
and
2 deletions
+81
-2
lm_eval/models/api_models.py
lm_eval/models/api_models.py
+1
-1
tests/models/test_api.py
tests/models/test_api.py
+80
-1
No files found.
lm_eval/models/api_models.py
View file @
3b7dbef9
...
@@ -475,7 +475,7 @@ class TemplateAPI(TemplateLM):
...
@@ -475,7 +475,7 @@ class TemplateAPI(TemplateLM):
**
kwargs
,
**
kwargs
,
)
->
Union
[
List
[
List
[
str
]],
List
[
List
[
Tuple
[
float
,
bool
]]]]:
)
->
Union
[
List
[
List
[
str
]],
List
[
List
[
Tuple
[
float
,
bool
]]]]:
ctxlens
=
ctxlens
if
ctxlens
else
[
None
]
*
len
(
requests
)
ctxlens
=
ctxlens
if
ctxlens
else
[
None
]
*
len
(
requests
)
conn
=
TCPConnector
(
limit
=
self
.
_concurrent
)
conn
=
TCPConnector
(
limit
=
self
.
_concurrent
,
ssl
=
self
.
verify_certificate
)
async
with
ClientSession
(
async
with
ClientSession
(
connector
=
conn
,
timeout
=
ClientTimeout
(
total
=
self
.
timeout
)
connector
=
conn
,
timeout
=
ClientTimeout
(
total
=
self
.
timeout
)
)
as
session
:
)
as
session
:
...
...
tests/models/test_api.py
View file @
3b7dbef9
from
unittest.mock
import
MagicMock
,
patch
import
asyncio
from
unittest.mock
import
AsyncMock
,
MagicMock
,
patch
import
pytest
import
pytest
...
@@ -21,6 +22,17 @@ def api_tokenized():
...
@@ -21,6 +22,17 @@ def api_tokenized():
)
)
@
pytest
.
fixture
def
api_batch_ssl_tokenized
():
return
LocalCompletionsAPI
(
base_url
=
"https://test-url.com"
,
model
=
"EleutherAI/pythia-1b"
,
verify_certificate
=
False
,
num_concurrent
=
2
,
tokenizer_backend
=
"huggingface"
,
)
def
test_create_payload_generate
(
api
):
def
test_create_payload_generate
(
api
):
messages
=
[
"Generate a story"
]
messages
=
[
"Generate a story"
]
gen_kwargs
=
{
gen_kwargs
=
{
...
@@ -147,3 +159,70 @@ def test_model_tokenized_call_usage(
...
@@ -147,3 +159,70 @@ def test_model_tokenized_call_usage(
assert
"json"
in
kwargs
assert
"json"
in
kwargs
assert
kwargs
[
"json"
]
==
expected_payload
assert
kwargs
[
"json"
]
==
expected_payload
assert
result
==
{
"result"
:
"success"
}
assert
result
==
{
"result"
:
"success"
}
class
DummyAsyncContextManager
:
def
__init__
(
self
,
result
):
self
.
result
=
result
async
def
__aenter__
(
self
):
return
self
.
result
async
def
__aexit__
(
self
,
exc_type
,
exc
,
tb
):
pass
@
pytest
.
mark
.
parametrize
(
"expected_inputs, expected_ctxlens, expected_cache_keys"
,
[
(
[
[
1
,
2
,
3
,
4
,
5
],
[
6
,
7
,
8
,
9
,
10
],
[
11
,
12
,
13
,
14
,
15
],
[
16
,
17
,
18
,
19
,
20
],
],
[
3
,
3
,
3
,
3
],
[
"cache_key1"
,
"cache_key2"
,
"cache_key3"
,
"cache_key4"
],
),
],
)
def
test_get_batched_requests_with_no_ssl
(
api_batch_ssl_tokenized
,
expected_inputs
,
expected_ctxlens
,
expected_cache_keys
):
with
(
patch
(
"lm_eval.models.api_models.TCPConnector"
,
autospec
=
True
)
as
mock_connector
,
patch
(
"lm_eval.models.api_models.ClientSession"
,
autospec
=
True
)
as
mock_client_session
,
patch
(
"lm_eval.models.openai_completions.LocalCompletionsAPI.parse_logprobs"
,
autospec
=
True
,
)
as
mock_parse
,
):
mock_session_instance
=
AsyncMock
()
mock_post_response
=
AsyncMock
()
mock_post_response
.
status
=
200
mock_post_response
.
ok
=
True
mock_post_response
.
json
=
AsyncMock
(
return_value
=
{
"mocked"
:
"response"
})
mock_post_response
.
raise_for_status
=
lambda
:
None
mock_session_instance
.
post
=
lambda
*
args
,
**
kwargs
:
DummyAsyncContextManager
(
mock_post_response
)
mock_client_session
.
return_value
.
__aenter__
.
return_value
=
mock_session_instance
mock_parse
.
return_value
=
[(
1.23
,
True
),
(
4.56
,
False
)]
async
def
run
():
return
await
api_batch_ssl_tokenized
.
get_batched_requests
(
expected_inputs
,
expected_cache_keys
,
generate
=
False
,
ctxlens
=
expected_ctxlens
,
)
result_batches
=
asyncio
.
run
(
run
())
mock_connector
.
assert_called_with
(
limit
=
2
,
ssl
=
False
)
assert
result_batches
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment