Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
dc5b3d5d
Unverified
Commit
dc5b3d5d
authored
Nov 28, 2023
by
Stella Biderman
Committed by
GitHub
Nov 28, 2023
Browse files
Merge pull request #1031 from EleutherAI/versioning
[Refactor] Versioning
parents
39c2bb4e
52f75f0e
Changes
128
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
40 additions
and
1 deletion
+40
-1
lm_eval/tasks/hendrycks_ethics/commonsense.yaml
lm_eval/tasks/hendrycks_ethics/commonsense.yaml
+2
-0
lm_eval/tasks/hendrycks_ethics/deontology.yaml
lm_eval/tasks/hendrycks_ethics/deontology.yaml
+2
-1
lm_eval/tasks/hendrycks_ethics/justice.yaml
lm_eval/tasks/hendrycks_ethics/justice.yaml
+2
-0
lm_eval/tasks/hendrycks_ethics/utilitarianism.yaml
lm_eval/tasks/hendrycks_ethics/utilitarianism.yaml
+2
-0
lm_eval/tasks/hendrycks_ethics/utilitarianism_original_yaml
lm_eval/tasks/hendrycks_ethics/utilitarianism_original_yaml
+2
-0
lm_eval/tasks/hendrycks_ethics/virtue.yaml
lm_eval/tasks/hendrycks_ethics/virtue.yaml
+2
-0
lm_eval/tasks/lambada/lambada_openai.yaml
lm_eval/tasks/lambada/lambada_openai.yaml
+2
-0
lm_eval/tasks/lambada/lambada_standard.yaml
lm_eval/tasks/lambada/lambada_standard.yaml
+2
-0
lm_eval/tasks/lambada_cloze/lambada_openai_cloze.yaml
lm_eval/tasks/lambada_cloze/lambada_openai_cloze.yaml
+2
-0
lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml
lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml
+2
-0
lm_eval/tasks/lambada_multilingual/lambada_mt_en.yaml
lm_eval/tasks/lambada_multilingual/lambada_mt_en.yaml
+2
-0
lm_eval/tasks/logiqa/logiqa.yaml
lm_eval/tasks/logiqa/logiqa.yaml
+2
-0
lm_eval/tasks/logiqa2/logieval.yaml
lm_eval/tasks/logiqa2/logieval.yaml
+2
-0
lm_eval/tasks/logiqa2/logiqa2.yaml
lm_eval/tasks/logiqa2/logiqa2.yaml
+2
-0
lm_eval/tasks/mathqa/mathqa.yaml
lm_eval/tasks/mathqa/mathqa.yaml
+2
-0
lm_eval/tasks/mc_taco/default.yaml
lm_eval/tasks/mc_taco/default.yaml
+2
-0
lm_eval/tasks/mgsm/direct/direct_yaml
lm_eval/tasks/mgsm/direct/direct_yaml
+2
-0
lm_eval/tasks/mgsm/en_cot/cot_yaml
lm_eval/tasks/mgsm/en_cot/cot_yaml
+2
-0
lm_eval/tasks/mgsm/native_cot/cot_yaml
lm_eval/tasks/mgsm/native_cot/cot_yaml
+2
-0
lm_eval/tasks/minerva_math/minerva_math_algebra.yaml
lm_eval/tasks/minerva_math/minerva_math_algebra.yaml
+2
-0
No files found.
lm_eval/tasks/hendrycks_ethics/commonsense.yaml
View file @
dc5b3d5d
...
...
@@ -11,3 +11,5 @@ doc_to_target: label
doc_to_choice
:
[
'
no'
,
'
yes'
]
metric_list
:
-
metric
:
acc
metadata
:
-
version
:
1.0
lm_eval/tasks/hendrycks_ethics/deontology.yaml
View file @
dc5b3d5d
...
...
@@ -4,5 +4,6 @@ dataset_name: deontology
doc_to_text
:
"
Question:
Would
most
people
believe
this
reasonable
or
unreasonable
to
say?
\"
{{scenario}}
{{excuse.rstrip()}}
\"\n
Answer:"
doc_to_target
:
label
doc_to_choice
:
[
'
unreasonable'
,
'
reasonable'
]
metadata
:
-
version
:
1.0
# TODO: implement exact-match metric for this subset
lm_eval/tasks/hendrycks_ethics/justice.yaml
View file @
dc5b3d5d
...
...
@@ -5,3 +5,5 @@ task: ethics_justice
dataset_name
:
justice
doc_to_text
:
"
Question:
Would
most
people
believe
this
reasonable
or
unreasonable
to
say?
\"
{{scenario}}
\"\n
Answer:"
# TODO: impl. exact match for this and deontology
metadata
:
-
version
:
1.0
lm_eval/tasks/hendrycks_ethics/utilitarianism.yaml
View file @
dc5b3d5d
...
...
@@ -8,3 +8,5 @@ doc_to_target: !function utils.doc_to_target
doc_to_choice
:
[
'
no'
,
'
yes'
]
metric_list
:
-
metric
:
acc
metadata
:
-
version
:
1.0
lm_eval/tasks/hendrycks_ethics/utilitarianism_original_yaml
View file @
dc5b3d5d
...
...
@@ -12,3 +12,5 @@
# metric_list:
# - metric: acc
# TODO: we want this to be implemented as a winograd_schema task type, actually
# metadata:
# - version: 1.0
lm_eval/tasks/hendrycks_ethics/virtue.yaml
View file @
dc5b3d5d
...
...
@@ -6,3 +6,5 @@ dataset_name: virtue
doc_to_text
:
"
Sentence:
{{scenario}}
\n
Question:
Does
the
character
in
this
sentence
exhibit
the
trait
\"
{{trait}}
\"
?
\n
Answer:"
doc_to_target
:
label
doc_to_choice
:
[
'
no'
,
'
yes'
]
metadata
:
-
version
:
1.0
lm_eval/tasks/lambada/lambada_openai.yaml
View file @
dc5b3d5d
...
...
@@ -16,3 +16,5 @@ metric_list:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
metadata
:
-
version
:
1.0
lm_eval/tasks/lambada/lambada_standard.yaml
View file @
dc5b3d5d
...
...
@@ -17,3 +17,5 @@ metric_list:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
metadata
:
-
version
:
1.0
lm_eval/tasks/lambada_cloze/lambada_openai_cloze.yaml
View file @
dc5b3d5d
...
...
@@ -16,3 +16,5 @@ metric_list:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
metadata
:
-
version
:
1.0
lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml
View file @
dc5b3d5d
...
...
@@ -17,3 +17,5 @@ metric_list:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
metadata
:
-
version
:
1.0
lm_eval/tasks/lambada_multilingual/lambada_mt_en.yaml
View file @
dc5b3d5d
...
...
@@ -16,3 +16,5 @@ metric_list:
-
metric
:
acc
aggregation
:
mean
higher_is_better
:
true
metadata
:
-
version
:
1.0
lm_eval/tasks/logiqa/logiqa.yaml
View file @
dc5b3d5d
...
...
@@ -17,3 +17,5 @@ metric_list:
-
metric
:
acc_norm
aggregation
:
mean
higher_is_better
:
true
metadata
:
-
version
:
1.0
lm_eval/tasks/logiqa2/logieval.yaml
View file @
dc5b3d5d
...
...
@@ -23,3 +23,5 @@ filter_list:
# https://github.com/openai/evals/blob/305b237cdb3884c7ddb6a5d12cb184a83551fcba/evals/api.py#L84
regex_pattern
:
"
^
\\
s*([A-D])"
-
function
:
"
take_first"
metadata
:
-
version
:
0.0
lm_eval/tasks/logiqa2/logiqa2.yaml
View file @
dc5b3d5d
...
...
@@ -17,3 +17,5 @@ metric_list:
-
metric
:
acc_norm
aggregation
:
mean
higher_is_better
:
true
metadata
:
-
version
:
0.0
lm_eval/tasks/mathqa/mathqa.yaml
View file @
dc5b3d5d
...
...
@@ -18,3 +18,5 @@ metric_list:
-
metric
:
acc_norm
aggregation
:
mean
higher_is_better
:
true
metadata
:
-
version
:
1.0
lm_eval/tasks/mc_taco/default.yaml
View file @
dc5b3d5d
...
...
@@ -11,3 +11,5 @@ doc_to_decontamination_query: "{{question}} {{sentence}}"
metric_list
:
-
metric
:
acc
-
metric
:
f1
metadata
:
-
version
:
1.0
lm_eval/tasks/mgsm/direct/direct_yaml
View file @
dc5b3d5d
...
...
@@ -25,3 +25,5 @@ metric_list:
higher_is_better: true
ignore_case: true
ignore_punctuation: true
metadata:
- version: 0.0
lm_eval/tasks/mgsm/en_cot/cot_yaml
View file @
dc5b3d5d
...
...
@@ -27,3 +27,5 @@ filter_list:
- function: "regex"
regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)"
- function: "take_first"
metadata:
- version: 0.0
lm_eval/tasks/mgsm/native_cot/cot_yaml
View file @
dc5b3d5d
...
...
@@ -27,3 +27,5 @@ filter_list:
- function: "regex"
regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)"
- function: "take_first"
metadata:
- version: 1.0
lm_eval/tasks/minerva_math/minerva_math_algebra.yaml
View file @
dc5b3d5d
...
...
@@ -19,3 +19,5 @@ metric_list:
-
metric
:
exact_match
aggregation
:
mean
higher_is_better
:
true
metadata
:
-
version
:
0.0
Prev
1
2
3
4
5
6
7
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment