Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
894370df
Commit
894370df
authored
Sep 15, 2023
by
baberabb
Browse files
make typing 3.8 compatible
parent
fa8f2381
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
6 additions
and
4 deletions
+6
-4
.github/workflows/new_tasks.yml
.github/workflows/new_tasks.yml
+2
-2
tests/test_evaluator.py
tests/test_evaluator.py
+2
-1
tests/utils.py
tests/utils.py
+2
-1
No files found.
.github/workflows/new_tasks.yml
View file @
894370df
...
@@ -63,10 +63,10 @@ jobs:
...
@@ -63,10 +63,10 @@ jobs:
-
name
:
Test with pytest
-
name
:
Test with pytest
# if new tasks are added, run tests on them
# if new tasks are added, run tests on them
if
:
steps.changed-tasks.outputs.tasks_any_modified == 'true'
if
:
steps.changed-tasks.outputs.tasks_any_modified == 'true'
run
:
python -m pytest tests/test_tasks.py -s -vv
-n=auto
run
:
python -m pytest tests/test_tasks.py -s -vv
# if api is modified, run tests on it
# if api is modified, run tests on it
-
name
:
Test more tasks with pytest
-
name
:
Test more tasks with pytest
env
:
env
:
API
:
true
API
:
true
if
:
steps.changed-tasks.outputs.api_any_modified == 'true'
if
:
steps.changed-tasks.outputs.api_any_modified == 'true'
run
:
python -m pytest tests/test_tasks.py -s -vv
-n=auto
run
:
python -m pytest tests/test_tasks.py -s -vv
tests/test_evaluator.py
View file @
894370df
...
@@ -7,6 +7,7 @@ import lm_eval.tasks as tasks
...
@@ -7,6 +7,7 @@ import lm_eval.tasks as tasks
# import lm_eval.models as models
# import lm_eval.models as models
import
lm_eval.api
as
api
import
lm_eval.api
as
api
import
lm_eval.evaluator
as
evaluator
import
lm_eval.evaluator
as
evaluator
from
typing
import
List
import
random
import
random
import
pytest
import
pytest
...
@@ -26,7 +27,7 @@ import pytest
...
@@ -26,7 +27,7 @@ import pytest
)
)
],
],
)
)
def
test_evaluator
(
task_name
:
l
ist
[
str
],
limit
:
int
,
model
:
str
,
model_args
:
str
):
def
test_evaluator
(
task_name
:
L
ist
[
str
],
limit
:
int
,
model
:
str
,
model_args
:
str
):
task_name
=
task_name
task_name
=
task_name
limit
=
10
limit
=
10
...
...
tests/utils.py
View file @
894370df
...
@@ -9,6 +9,7 @@ import os
...
@@ -9,6 +9,7 @@ import os
# This is the path where the output for the changed files for the tasks folder is stored
# This is the path where the output for the changed files for the tasks folder is stored
# FILE_PATH = file_path = ".github/outputs/tasks_all_changed_and_modified_files.txt"
# FILE_PATH = file_path = ".github/outputs/tasks_all_changed_and_modified_files.txt"
# reads a text file and returns a list of words
# reads a text file and returns a list of words
# used to read the output of the changed txt from tj-actions/changed-files
# used to read the output of the changed txt from tj-actions/changed-files
def
load_changed_files
(
file_path
:
str
)
->
List
[
str
]:
def
load_changed_files
(
file_path
:
str
)
->
List
[
str
]:
...
@@ -32,7 +33,7 @@ def parser(full_path: List[str]) -> List[str]:
...
@@ -32,7 +33,7 @@ def parser(full_path: List[str]) -> List[str]:
return
list
(
_output
)
return
list
(
_output
)
def
new_tasks
()
->
Union
[
l
ist
[
str
],
None
]:
def
new_tasks
()
->
Union
[
L
ist
[
str
],
None
]:
FILENAME
=
".github/outputs/tasks_all_changed_and_modified_files.txt"
FILENAME
=
".github/outputs/tasks_all_changed_and_modified_files.txt"
if
os
.
path
.
exists
(
FILENAME
):
if
os
.
path
.
exists
(
FILENAME
):
# If tasks folder has changed then we get the list of files from FILENAME
# If tasks folder has changed then we get the list of files from FILENAME
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment