Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
4eecbabb
Commit
4eecbabb
authored
Sep 16, 2024
by
Baber
Browse files
Merge branch 'main' into prefill
parents
dac8b534
fb963f0f
Changes
465
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
411 additions
and
0 deletions
+411
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_copa/utils.py
...board_complete/arabic_leaderboard_arabic_mt_copa/utils.py
+19
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_hellaswag/arabic_leaderboard_arabic_mt_hellaswag.yaml
..._mt_hellaswag/arabic_leaderboard_arabic_mt_hellaswag.yaml
+13
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_hellaswag/arabic_mt_hellaswag.yaml
..._leaderboard_arabic_mt_hellaswag/arabic_mt_hellaswag.yaml
+23
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_hellaswag/utils.py
..._complete/arabic_leaderboard_arabic_mt_hellaswag/utils.py
+30
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_mmlu/arabic_leaderboard_arabic_mt_mmlu.yaml
...ard_arabic_mt_mmlu/arabic_leaderboard_arabic_mt_mmlu.yaml
+13
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_mmlu/arabic_mt_mmlu.yaml
...ete/arabic_leaderboard_arabic_mt_mmlu/arabic_mt_mmlu.yaml
+23
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_mmlu/utils.py
...board_complete/arabic_leaderboard_arabic_mt_mmlu/utils.py
+23
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_openbook_qa/arabic_leaderboard_arabic_mt_openbook_qa.yaml
...openbook_qa/arabic_leaderboard_arabic_mt_openbook_qa.yaml
+13
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_openbook_qa/arabic_mt_openbook_qa.yaml
...derboard_arabic_mt_openbook_qa/arabic_mt_openbook_qa.yaml
+23
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_openbook_qa/utils.py
...omplete/arabic_leaderboard_arabic_mt_openbook_qa/utils.py
+23
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_piqa/arabic_leaderboard_arabic_mt_piqa.yaml
...ard_arabic_mt_piqa/arabic_leaderboard_arabic_mt_piqa.yaml
+13
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_piqa/arabic_mt_piqa.yaml
...ete/arabic_leaderboard_arabic_mt_piqa/arabic_mt_piqa.yaml
+23
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_piqa/utils.py
...board_complete/arabic_leaderboard_arabic_mt_piqa/utils.py
+23
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_race/arabic_leaderboard_arabic_mt_race.yaml
...ard_arabic_mt_race/arabic_leaderboard_arabic_mt_race.yaml
+13
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_race/arabic_mt_race.yaml
...ete/arabic_leaderboard_arabic_mt_race/arabic_mt_race.yaml
+23
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_race/utils.py
...board_complete/arabic_leaderboard_arabic_mt_race/utils.py
+23
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_sciq/arabic_leaderboard_arabic_mt_sciq.yaml
...ard_arabic_mt_sciq/arabic_leaderboard_arabic_mt_sciq.yaml
+13
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_sciq/arabic_mt_sciq.yaml
...ete/arabic_leaderboard_arabic_mt_sciq/arabic_mt_sciq.yaml
+23
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_sciq/utils.py
...board_complete/arabic_leaderboard_arabic_mt_sciq/utils.py
+41
-0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_toxigen/arabic_leaderboard_arabic_mt_toxigen.yaml
...abic_mt_toxigen/arabic_leaderboard_arabic_mt_toxigen.yaml
+13
-0
No files found.
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_copa/utils.py
0 → 100644
View file @
4eecbabb
import
datasets
import
numpy
as
np
def
process_docs
(
dataset
:
datasets
.
Dataset
):
def
_process_doc
(
doc
):
premise
=
doc
[
"premise"
]
choices
=
[
doc
[
"choice1"
],
doc
[
"choice2"
]]
question_map
=
{
"cause"
:
"لأن"
,
"effect"
:
"لذلك"
}
question
=
question_map
[
doc
[
"question"
]]
answer
=
doc
[
"label"
]
query
=
"{}، {} :
\n
0) {}
\n
1) {}
\n
الإجابة:"
.
format
(
premise
,
question
,
choices
[
0
],
choices
[
1
]
)
return
{
"query"
:
query
,
"choices"
:
choices
,
"gold"
:
answer
}
return
dataset
.
map
(
_process_doc
)
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_hellaswag/arabic_leaderboard_arabic_mt_hellaswag.yaml
0 → 100644
View file @
4eecbabb
group
:
arabic_leaderboard_arabic_mt_hellaswag
task
:
-
arabic_mt_hellaswag
aggregate_metric_list
:
-
metric
:
acc
aggregation
:
mean
weight_by_size
:
true
-
metric
:
acc_norm
aggregation
:
mean
weight_by_size
:
true
metadata
:
version
:
1.0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_hellaswag/arabic_mt_hellaswag.yaml
0 → 100644
View file @
4eecbabb
task
:
arabic_mt_hellaswag
dataset_path
:
OALL/AlGhafa-Arabic-LLM-Benchmark-Translated
dataset_name
:
hellaswag_okapi_ar
output_type
:
multiple_choice
training_split
:
null
validation_split
:
validation
test_split
:
test
process_docs
:
!function
utils.process_docs
doc_to_text
:
"
{{query}}"
doc_to_target
:
"
{{gold}}"
doc_to_choice
:
"
choices"
fewshot_split
:
validation
fewshot_config
:
sampler
:
first_n
metric_list
:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
-
metric
:
acc_norm
aggregation
:
mean
higher_is_better
:
true
metadata
:
version
:
1.0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_hellaswag/utils.py
0 → 100644
View file @
4eecbabb
import
re
import
datasets
import
numpy
as
np
def
process_docs
(
dataset
:
datasets
.
Dataset
):
def
_process_doc
(
doc
):
ctx
=
re
.
sub
(
r
"\[.*?\]"
,
""
,
doc
[
"ctx"
])
# Remove latin words within brackets
endings
=
[
re
.
sub
(
r
"\[.*?\]"
,
""
,
e
)
for
e
in
eval
(
doc
[
"endings"
])
]
# endings is a string representation of a list
answer_index
=
doc
[
"label"
]
instruction
=
(
"بناء على السياق التالي، اختر النهاية الصحيحة من الاقتراحات التالية"
)
query
=
f
"""
{
instruction
}
السياق:
{
ctx
}
الاقتراحات:
"""
for
i
,
ending
in
enumerate
(
endings
):
query
+=
f
"
{
i
}
)
{
ending
}
\n
"
query
+=
"الإجابة:"
return
{
"query"
:
query
,
"choices"
:
endings
,
"gold"
:
answer_index
}
return
dataset
.
map
(
_process_doc
)
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_mmlu/arabic_leaderboard_arabic_mt_mmlu.yaml
0 → 100644
View file @
4eecbabb
group
:
arabic_leaderboard_arabic_mt_mmlu
task
:
-
arabic_mt_mmlu
aggregate_metric_list
:
-
metric
:
acc
aggregation
:
mean
weight_by_size
:
true
-
metric
:
acc_norm
aggregation
:
mean
weight_by_size
:
true
metadata
:
version
:
1.0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_mmlu/arabic_mt_mmlu.yaml
0 → 100644
View file @
4eecbabb
task
:
arabic_mt_mmlu
dataset_path
:
OALL/AlGhafa-Arabic-LLM-Benchmark-Translated
dataset_name
:
mmlu_okapi_ar
output_type
:
multiple_choice
training_split
:
null
validation_split
:
validation
test_split
:
test
process_docs
:
!function
utils.process_docs
doc_to_text
:
"
{{query}}"
doc_to_target
:
"
{{gold}}"
doc_to_choice
:
"
choices"
fewshot_split
:
validation
fewshot_config
:
sampler
:
first_n
metric_list
:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
-
metric
:
acc_norm
aggregation
:
mean
higher_is_better
:
true
metadata
:
version
:
1.0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_mmlu/utils.py
0 → 100644
View file @
4eecbabb
import
datasets
import
numpy
as
np
def
process_docs
(
dataset
:
datasets
.
Dataset
):
def
_process_doc
(
doc
):
question
=
doc
[
"query"
]
answer_index
=
int
(
doc
[
"label"
])
# Dynamically determining the choices by excluding '__few_shots', 'query' and 'label'
choices_keys
=
[
key
for
key
in
doc
.
keys
()
if
key
not
in
[
"query"
,
"label"
,
"__few_shots"
]
]
choices
=
[
doc
[
key
]
for
key
in
choices_keys
]
instruction
=
"الأسئلة التالية هي أسئلة متعددة الإختيارات مع الجواب الصحيح
\n\n
"
query
=
f
"
{
instruction
}
السؤال:
{
question
}
\n
"
for
index
,
choice
in
enumerate
(
choices
):
query
+=
f
"
{
index
}
)
{
choice
}
\n
"
query
+=
"الإجابة:"
return
{
"query"
:
query
,
"choices"
:
choices
,
"gold"
:
answer_index
}
return
dataset
.
map
(
_process_doc
)
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_openbook_qa/arabic_leaderboard_arabic_mt_openbook_qa.yaml
0 → 100644
View file @
4eecbabb
group
:
arabic_leaderboard_arabic_mt_openbook_qa
task
:
-
arabic_mt_openbook_qa
aggregate_metric_list
:
-
metric
:
acc
aggregation
:
mean
weight_by_size
:
true
-
metric
:
acc_norm
aggregation
:
mean
weight_by_size
:
true
metadata
:
version
:
1.0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_openbook_qa/arabic_mt_openbook_qa.yaml
0 → 100644
View file @
4eecbabb
task
:
arabic_mt_openbook_qa
dataset_path
:
OALL/AlGhafa-Arabic-LLM-Benchmark-Translated
dataset_name
:
openbook_qa_ext_ar
output_type
:
multiple_choice
training_split
:
null
validation_split
:
validation
test_split
:
test
process_docs
:
!function
utils.process_docs
doc_to_text
:
"
{{query}}"
doc_to_target
:
"
{{gold}}"
doc_to_choice
:
"
choices"
fewshot_split
:
validation
fewshot_config
:
sampler
:
first_n
metric_list
:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
-
metric
:
acc_norm
aggregation
:
mean
higher_is_better
:
true
metadata
:
version
:
1.0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_openbook_qa/utils.py
0 → 100644
View file @
4eecbabb
import
datasets
import
numpy
as
np
def
process_docs
(
dataset
:
datasets
.
Dataset
):
def
_process_doc
(
doc
):
question
=
doc
[
"query"
]
answer_index
=
int
(
doc
[
"label"
])
# Dynamically determining the choices by excluding '__few_shots', 'query' and 'label'
choices_keys
=
[
key
for
key
in
doc
.
keys
()
if
key
not
in
[
"query"
,
"label"
,
"__few_shots"
]
]
choices
=
[
doc
[
key
]
for
key
in
choices_keys
]
instruction
=
"الأسئلة التالية هي أسئلة متعددة الإختيارات مع الجواب الصحيح
\n\n
"
query
=
f
"
{
instruction
}
السؤال:
{
question
}
\n
"
for
index
,
choice
in
enumerate
(
choices
):
query
+=
f
"
{
index
}
)
{
choice
}
\n
"
query
+=
"الإجابة:"
return
{
"query"
:
query
,
"choices"
:
choices
,
"gold"
:
answer_index
}
return
dataset
.
map
(
_process_doc
)
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_piqa/arabic_leaderboard_arabic_mt_piqa.yaml
0 → 100644
View file @
4eecbabb
group
:
arabic_leaderboard_arabic_mt_piqa
task
:
-
arabic_mt_piqa
aggregate_metric_list
:
-
metric
:
acc
aggregation
:
mean
weight_by_size
:
true
-
metric
:
acc_norm
aggregation
:
mean
weight_by_size
:
true
metadata
:
version
:
1.0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_piqa/arabic_mt_piqa.yaml
0 → 100644
View file @
4eecbabb
task
:
arabic_mt_piqa
dataset_path
:
OALL/AlGhafa-Arabic-LLM-Benchmark-Translated
dataset_name
:
piqa_ar
output_type
:
multiple_choice
training_split
:
null
validation_split
:
validation
test_split
:
test
process_docs
:
!function
utils.process_docs
doc_to_text
:
"
{{query}}"
doc_to_target
:
"
{{gold}}"
doc_to_choice
:
"
choices"
fewshot_split
:
validation
fewshot_config
:
sampler
:
first_n
metric_list
:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
-
metric
:
acc_norm
aggregation
:
mean
higher_is_better
:
true
metadata
:
version
:
1.0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_piqa/utils.py
0 → 100644
View file @
4eecbabb
import
datasets
import
numpy
as
np
def
process_docs
(
dataset
:
datasets
.
Dataset
):
def
_process_doc
(
doc
):
question
=
doc
[
"query"
]
answer_index
=
int
(
doc
[
"label"
])
# Dynamically determining the choices by excluding '__few_shots', 'query' and 'label'
choices_keys
=
[
key
for
key
in
doc
.
keys
()
if
key
not
in
[
"query"
,
"label"
,
"__few_shots"
]
]
choices
=
[
doc
[
key
]
for
key
in
choices_keys
]
instruction
=
"الأسئلة التالية هي أسئلة متعددة الإختيارات مع الجواب الصحيح
\n\n
"
query
=
f
"
{
instruction
}
السؤال:
{
question
}
\n
"
for
index
,
choice
in
enumerate
(
choices
):
query
+=
f
"
{
index
}
)
{
choice
}
\n
"
query
+=
"الإجابة:"
return
{
"query"
:
query
,
"choices"
:
choices
,
"gold"
:
answer_index
}
return
dataset
.
map
(
_process_doc
)
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_race/arabic_leaderboard_arabic_mt_race.yaml
0 → 100644
View file @
4eecbabb
group
:
arabic_leaderboard_arabic_mt_race
task
:
-
arabic_mt_race
aggregate_metric_list
:
-
metric
:
acc
aggregation
:
mean
weight_by_size
:
true
-
metric
:
acc_norm
aggregation
:
mean
weight_by_size
:
true
metadata
:
version
:
1.0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_race/arabic_mt_race.yaml
0 → 100644
View file @
4eecbabb
task
:
arabic_mt_race
dataset_path
:
OALL/AlGhafa-Arabic-LLM-Benchmark-Translated
dataset_name
:
race_ar
output_type
:
multiple_choice
training_split
:
null
validation_split
:
validation
test_split
:
test
process_docs
:
!function
utils.process_docs
doc_to_text
:
"
{{query}}"
doc_to_target
:
"
{{gold}}"
doc_to_choice
:
"
choices"
fewshot_split
:
validation
fewshot_config
:
sampler
:
first_n
metric_list
:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
-
metric
:
acc_norm
aggregation
:
mean
higher_is_better
:
true
metadata
:
version
:
1.0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_race/utils.py
0 → 100644
View file @
4eecbabb
import
datasets
import
numpy
as
np
def
process_docs
(
dataset
:
datasets
.
Dataset
):
def
_process_doc
(
doc
):
question
=
doc
[
"query"
]
answer_index
=
int
(
doc
[
"label"
])
# Dynamically determining the choices by excluding '__few_shots', 'query' and 'label'
choices_keys
=
[
key
for
key
in
doc
.
keys
()
if
key
not
in
[
"query"
,
"label"
,
"__few_shots"
]
]
choices
=
[
doc
[
key
]
for
key
in
choices_keys
]
instruction
=
"الأسئلة التالية هي أسئلة متعددة الإختيارات مع الجواب الصحيح
\n\n
"
query
=
f
"
{
instruction
}
السؤال:
{
question
}
\n
"
for
index
,
choice
in
enumerate
(
choices
):
query
+=
f
"
{
index
}
)
{
choice
}
\n
"
query
+=
"الإجابة:"
return
{
"query"
:
query
,
"choices"
:
choices
,
"gold"
:
answer_index
}
return
dataset
.
map
(
_process_doc
)
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_sciq/arabic_leaderboard_arabic_mt_sciq.yaml
0 → 100644
View file @
4eecbabb
group
:
arabic_leaderboard_arabic_mt_sciq
task
:
-
arabic_mt_sciq
aggregate_metric_list
:
-
metric
:
acc
aggregation
:
mean
weight_by_size
:
true
-
metric
:
acc_norm
aggregation
:
mean
weight_by_size
:
true
metadata
:
version
:
1.0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_sciq/arabic_mt_sciq.yaml
0 → 100644
View file @
4eecbabb
task
:
arabic_mt_sciq
dataset_path
:
OALL/AlGhafa-Arabic-LLM-Benchmark-Translated
dataset_name
:
sciq_ar
output_type
:
multiple_choice
training_split
:
null
validation_split
:
validation
test_split
:
test
process_docs
:
!function
utils.process_docs
doc_to_text
:
"
{{query}}"
doc_to_target
:
"
{{gold}}"
doc_to_choice
:
"
choices"
fewshot_split
:
validation
fewshot_config
:
sampler
:
first_n
metric_list
:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
-
metric
:
acc_norm
aggregation
:
mean
higher_is_better
:
true
metadata
:
version
:
1.0
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_sciq/utils.py
0 → 100644
View file @
4eecbabb
import
random
import
datasets
import
numpy
as
np
def
doc_to_text
(
doc
):
instruction
=
(
"بناءً على السياق أدناه، اختر الإجابة الصحيحة للسؤال التالي من قائمة الاقتراحات"
)
support
=
doc
[
"support"
]
question
=
doc
[
"question"
]
query
=
f
"""
{
instruction
}
السياق:
{
support
}
السؤال:
{
question
}
الإجابات المحتملة:
"""
return
query
def
process_docs
(
dataset
:
datasets
.
Dataset
):
def
_process_doc
(
doc
):
correct_answer
=
doc
[
"correct_answer"
]
choices
=
[
doc
[
"distractor1"
],
doc
[
"distractor2"
],
doc
[
"distractor3"
],
correct_answer
,
]
# Shuffle the choices
random
.
shuffle
(
choices
)
answer_index
=
choices
.
index
(
correct_answer
)
return
{
"query"
:
doc_to_text
(
doc
),
"choices"
:
choices
,
"gold"
:
answer_index
}
return
dataset
.
map
(
_process_doc
)
lm_eval/tasks/arabic_leaderboard_complete/arabic_leaderboard_arabic_mt_toxigen/arabic_leaderboard_arabic_mt_toxigen.yaml
0 → 100644
View file @
4eecbabb
group
:
arabic_leaderboard_arabic_mt_toxigen
task
:
-
arabic_mt_toxigen
aggregate_metric_list
:
-
metric
:
acc
aggregation
:
mean
weight_by_size
:
true
-
metric
:
acc_norm
aggregation
:
mean
weight_by_size
:
true
metadata
:
version
:
1.0
Prev
1
2
3
4
5
6
7
8
9
10
…
24
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment