Unverified Commit 6a5cde6a authored by Jess's avatar Jess Committed by GitHub
Browse files

Merge pull request #23 from JessicaOjo/africamgsm

manual xnli, bypass multiple choice logits for openai
parents fb142ccd 9701ef6e
# Generated by utils.py
dataset_name: twi
include: afrixnli_manual_direct_yaml
task: afrixnli_manual_direct_twi
# Generated by utils.py
dataset_name: wol
include: afrixnli_manual_direct_yaml
task: afrixnli_manual_direct_wol
# Generated by utils.py
dataset_name: xho
include: afrixnli_manual_direct_yaml
task: afrixnli_manual_direct_xho
group:
- xnli
- afrixnli
- afrixnli-manual
dataset_path: masakhane/afrixnli
dataset_name: null
output_type: multiple_choice_gpt
validation_split: validation
test_split: test
fewshot_split: validation
doc_to_text: !function utils.doc_to_text
doc_to_target: !function utils.doc_to_target
doc_to_choice:
- "entailment"
- "neutral"
- "contradiction"
should_decontaminate: true
doc_to_decontamination_query: premise
metric_list:
- metric: f1
aggregation: !function utils.weighted_f1_score
average: weighted
higher_is_better: True
ignore_case: true
ignore_punctuation: true
- metric: acc
aggregation: acc_gpt
higher_is_better: true
ignore_case: true
ignore_punctuation: true
metadata:
version: 1.0
# Generated by utils.py
dataset_name: yor
include: afrixnli_manual_direct_yaml
task: afrixnli_manual_direct_yor
# Generated by utils.py
dataset_name: zul
include: afrixnli_manual_direct_yaml
task: afrixnli_manual_direct_zul
from sklearn.metrics import f1_score
def doc_to_text(doc):
output = """Please identify whether the premise entails or contradicts the hypothesis in the following premise
and hypothesis. The answer should be exact entailment, contradiction, or neutral.
Premise: {premise}
Hypothesis: {hypothesis}
Is it entailment, contradiction, or neutral?"""
text = output.format(premise=doc['premise'],
hypothesis=doc['hypothesis'])
return text
def doc_to_target(doc):
replacements = {
0: 'entailment',
1: 'neutral',
2: 'contradiction'
}
return replacements[doc["label"]]
def weighted_f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = f1_score(golds, preds, average="weighted")
return fscore
# Generated by utils.py
dataset_name: amh
include: afrixnli_manual_translate_yaml
task: afrixnli_manual_translate_amh
# Generated by utils.py
dataset_name: ewe
include: afrixnli_manual_translate_yaml
task: afrixnli_manual_translate_ewe
# Generated by utils.py
dataset_name: fra
include: afrixnli_manual_translate_yaml
task: afrixnli_manual_translate_fra
# Generated by utils.py
dataset_name: hau
include: afrixnli_manual_translate_yaml
task: afrixnli_manual_translate_hau
# Generated by utils.py
dataset_name: ibo
include: afrixnli_manual_translate_yaml
task: afrixnli_manual_translate_ibo
# Generated by utils.py
dataset_name: kin
include: afrixnli_manual_translate_yaml
task: afrixnli_manual_translate_kin
# Generated by utils.py
dataset_name: lin
include: afrixnli_manual_translate_yaml
task: afrixnli_manual_translate_lin
# Generated by utils.py
dataset_name: lug
include: afrixnli_manual_translate_yaml
task: afrixnli_manual_translate_lug
# Generated by utils.py
dataset_name: orm
include: afrixnli_manual_translate_yaml
task: afrixnli_manual_translate_orm
# Generated by utils.py
dataset_name: sna
include: afrixnli_manual_translate_yaml
task: afrixnli_manual_translate_sna
# Generated by utils.py
dataset_name: sot
include: afrixnli_manual_translate_yaml
task: afrixnli_manual_translate_sot
# Generated by utils.py
dataset_name: swa
include: afrixnli_manual_translate_yaml
task: afrixnli_manual_translate_swa
# Generated by utils.py
dataset_name: twi
include: afrixnli_manual_translate_yaml
task: afrixnli_manual_translate_twi
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment