Unverified Commit a6c640d3 authored by Lintang Sutawika's avatar Lintang Sutawika Committed by GitHub
Browse files

Merge branch 'big-refactor' into seq2seq-refactor

parents 55eccc29 24e3e3fa
{
"results": {
"pawsx_de": {
"acc": 0.491,
"acc_stderr": 0.01118132420626029
},
"pawsx_ko": {
"acc": 0.4625,
"acc_stderr": 0.011151639095992287
},
"pawsx_en": {
"acc": 0.5065,
"acc_stderr": 0.0111821910061423
},
"pawsx_es": {
"acc": 0.5255,
"acc_stderr": 0.011168582883330072
},
"pawsx_fr": {
"acc": 0.508,
"acc_stderr": 0.011181704488030002
},
"pawsx_ja": {
"acc": 0.441,
"acc_stderr": 0.011105006104468738
},
"pawsx_zh": {
"acc": 0.478,
"acc_stderr": 0.011172305500884872
}
},
"versions": {
"pawsx_de": 0,
"pawsx_ko": 0,
"pawsx_en": 0,
"pawsx_es": 0,
"pawsx_fr": 0,
"pawsx_ja": 0,
"pawsx_zh": 0
},
"config": {
"model": "hf-causal-experimental",
"model_args": "pretrained=facebook/xglm-564M,use_accelerate=True",
"num_fewshot": 0,
"batch_size": "auto",
"device": "cuda",
"no_cache": true,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}
{
"results": {
"xcopa_id": {
"acc": 0.572,
"acc_stderr": 0.02214979066386193
},
"xcopa_th": {
"acc": 0.552,
"acc_stderr": 0.02226169729227013
},
"xcopa_it": {
"acc": 0.538,
"acc_stderr": 0.022318338119870534
},
"xcopa_ht": {
"acc": 0.55,
"acc_stderr": 0.022270877485360437
},
"xcopa_tr": {
"acc": 0.544,
"acc_stderr": 0.02229623834840705
},
"xcopa_zh": {
"acc": 0.556,
"acc_stderr": 0.02224224437573102
},
"xcopa_vi": {
"acc": 0.584,
"acc_stderr": 0.02206494331392886
},
"xcopa_sw": {
"acc": 0.532,
"acc_stderr": 0.022337186479044296
},
"xcopa_ta": {
"acc": 0.562,
"acc_stderr": 0.022210326363977413
},
"xcopa_qu": {
"acc": 0.492,
"acc_stderr": 0.022380208834928028
},
"xcopa_et": {
"acc": 0.556,
"acc_stderr": 0.02224224437573102
}
},
"versions": {
"xcopa_id": 0,
"xcopa_th": 0,
"xcopa_it": 0,
"xcopa_ht": 0,
"xcopa_tr": 0,
"xcopa_zh": 0,
"xcopa_vi": 0,
"xcopa_sw": 0,
"xcopa_ta": 0,
"xcopa_qu": 0,
"xcopa_et": 0
},
"config": {
"model": "hf-causal-experimental",
"model_args": "pretrained=facebook/xglm-564M,use_accelerate=True",
"num_fewshot": 0,
"batch_size": "auto",
"device": "cuda",
"no_cache": true,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}
{
"results": {
"xnli_de": {
"acc": 0.44491017964071855,
"acc_stderr": 0.007021700267328247
},
"xnli_sw": {
"acc": 0.36067864271457084,
"acc_stderr": 0.006784913322438225
},
"xnli_ar": {
"acc": 0.3341317365269461,
"acc_stderr": 0.006664652441694282
},
"xnli_ru": {
"acc": 0.44630738522954094,
"acc_stderr": 0.007023860641475025
},
"xnli_fr": {
"acc": 0.4548902195608782,
"acc_stderr": 0.007035901825327945
},
"xnli_zh": {
"acc": 0.33512974051896205,
"acc_stderr": 0.0066695943825036295
},
"xnli_th": {
"acc": 0.38782435129740517,
"acc_stderr": 0.006884621148080227
},
"xnli_el": {
"acc": 0.39560878243512976,
"acc_stderr": 0.0069090210962589015
},
"xnli_ur": {
"acc": 0.3447105788423154,
"acc_stderr": 0.006715345603576113
},
"xnli_hi": {
"acc": 0.38682634730538923,
"acc_stderr": 0.00688135955102393
},
"xnli_es": {
"acc": 0.42035928143712575,
"acc_stderr": 0.006974518775334314
},
"xnli_tr": {
"acc": 0.40199600798403196,
"acc_stderr": 0.006927673254973623
},
"xnli_bg": {
"acc": 0.41297405189620756,
"acc_stderr": 0.006956880058406987
},
"xnli_vi": {
"acc": 0.3848303393213573,
"acc_stderr": 0.0068747449435859675
},
"xnli_en": {
"acc": 0.48283433133732534,
"acc_stderr": 0.0070605478229289675
}
},
"versions": {
"xnli_de": 0,
"xnli_sw": 0,
"xnli_ar": 0,
"xnli_ru": 0,
"xnli_fr": 0,
"xnli_zh": 0,
"xnli_th": 0,
"xnli_el": 0,
"xnli_ur": 0,
"xnli_hi": 0,
"xnli_es": 0,
"xnli_tr": 0,
"xnli_bg": 0,
"xnli_vi": 0,
"xnli_en": 0
},
"config": {
"model": "hf-causal-experimental",
"model_args": "pretrained=facebook/xglm-564M,use_accelerate=True",
"num_fewshot": 0,
"batch_size": "auto",
"device": "cuda",
"no_cache": true,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}
{
"results": {
"xstory_cloze_ar": {
"acc": 0.500992720052945,
"acc_stderr": 0.012867099955422925
},
"xstory_cloze_id": {
"acc": 0.5400397088021178,
"acc_stderr": 0.012825802370083988
},
"xstory_cloze_sw": {
"acc": 0.5307743216412971,
"acc_stderr": 0.01284273034058578
},
"xstory_cloze_en": {
"acc": 0.6055592322964924,
"acc_stderr": 0.012577106513936133
},
"xstory_cloze_te": {
"acc": 0.5585704831237591,
"acc_stderr": 0.012778538985880637
},
"xstory_cloze_zh": {
"acc": 0.5327597617471873,
"acc_stderr": 0.012839477563855915
},
"xstory_cloze_my": {
"acc": 0.514890800794176,
"acc_stderr": 0.012861417842074004
},
"xstory_cloze_hi": {
"acc": 0.5228325612177366,
"acc_stderr": 0.01285370238487085
},
"xstory_cloze_ru": {
"acc": 0.5618795499669094,
"acc_stderr": 0.01276820661627776
},
"xstory_cloze_es": {
"acc": 0.5506287227001986,
"acc_stderr": 0.012800991591293383
},
"xstory_cloze_eu": {
"acc": 0.5314361350099271,
"acc_stderr": 0.012841668760976905
}
},
"versions": {
"xstory_cloze_ar": 0,
"xstory_cloze_id": 0,
"xstory_cloze_sw": 0,
"xstory_cloze_en": 0,
"xstory_cloze_te": 0,
"xstory_cloze_zh": 0,
"xstory_cloze_my": 0,
"xstory_cloze_hi": 0,
"xstory_cloze_ru": 0,
"xstory_cloze_es": 0,
"xstory_cloze_eu": 0
},
"config": {
"model": "hf-causal-experimental",
"model_args": "pretrained=facebook/xglm-564M,use_accelerate=True",
"num_fewshot": 0,
"batch_size": "auto",
"device": "cuda",
"no_cache": true,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}
{
"results": {
"xwinograd_pt": {
"acc": 0.5855513307984791,
"acc_stderr": 0.030434573161228055
},
"xwinograd_zh": {
"acc": 0.6567460317460317,
"acc_stderr": 0.0211700809891982
},
"xwinograd_ru": {
"acc": 0.5904761904761905,
"acc_stderr": 0.02775082824017435
},
"xwinograd_fr": {
"acc": 0.5783132530120482,
"acc_stderr": 0.054534284852951115
},
"xwinograd_en": {
"acc": 0.6262365591397849,
"acc_stderr": 0.01003574358830904
},
"xwinograd_jp": {
"acc": 0.5453597497393118,
"acc_stderr": 0.01608765437474968
}
},
"versions": {
"xwinograd_pt": 0,
"xwinograd_zh": 0,
"xwinograd_ru": 0,
"xwinograd_fr": 0,
"xwinograd_en": 0,
"xwinograd_jp": 0
},
"config": {
"model": "hf-causal-experimental",
"model_args": "pretrained=facebook/xglm-564M,use_accelerate=True",
"num_fewshot": 0,
"batch_size": "auto",
"device": "cuda",
"no_cache": true,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}
# xglm-7.5B
## xglm-7.5B_common_sense_reasoning_0-shot.json
| Task |Version| Metric |Value| |Stderr|
|-------------|------:|--------|----:|---|-----:|
|arc_challenge| 0|acc |28.75|± | 1.32|
| | |acc_norm|31.91|± | 1.36|
|arc_easy | 0|acc |62.37|± | 0.99|
| | |acc_norm|58.63|± | 1.01|
|boolq | 1|acc |60.18|± | 0.86|
|copa | 0|acc |79.00|± | 4.09|
|hellaswag | 0|acc |45.69|± | 0.50|
| | |acc_norm|61.23|± | 0.49|
|mc_taco | 0|em |13.81| | |
| | |f1 |47.92| | |
|openbookqa | 0|acc |25.40|± | 1.95|
| | |acc_norm|35.80|± | 2.15|
|piqa | 0|acc |73.94|± | 1.02|
| | |acc_norm|74.92|± | 1.01|
|prost | 0|acc |25.89|± | 0.32|
| | |acc_norm|26.36|± | 0.32|
|swag | 0|acc |50.51|± | 0.35|
| | |acc_norm|69.23|± | 0.33|
|winogrande | 0|acc |57.85|± | 1.39|
|wsc273 | 0|acc |75.82|± | 2.60|
## xglm-7.5B_gsm8k_8-shot.json
|Task |Version|Metric|Value| |Stderr|
|-----|------:|------|----:|---|-----:|
|gsm8k| 0|acc | 0.15|± | 0.11|
## xglm-7.5B_mathematical_reasoning_few_shot_5-shot.json
| Task |Version| Metric |Value| |Stderr|
|-------------------------|------:|--------|----:|---|-----:|
|drop | 1|em | 5.42|± | 0.23|
| | |f1 | 8.96|± | 0.26|
|gsm8k | 0|acc | 0.23|± | 0.13|
|math_algebra | 1|acc | 0.00|± | 0.00|
|math_counting_and_prob | 1|acc | 0.00|± | 0.00|
|math_geometry | 1|acc | 0.00|± | 0.00|
|math_intermediate_algebra| 1|acc | 0.00|± | 0.00|
|math_num_theory | 1|acc | 0.00|± | 0.00|
|math_prealgebra | 1|acc | 0.00|± | 0.00|
|math_precalc | 1|acc | 0.00|± | 0.00|
|mathqa | 0|acc |23.99|± | 0.78|
| | |acc_norm|23.52|± | 0.78|
## xglm-7.5B_pawsx_0-shot.json
| Task |Version|Metric|Value| |Stderr|
|--------|------:|------|----:|---|-----:|
|pawsx_de| 0|acc |55.90|± | 1.11|
|pawsx_en| 0|acc |58.85|± | 1.10|
|pawsx_es| 0|acc |52.80|± | 1.12|
|pawsx_fr| 0|acc |51.80|± | 1.12|
|pawsx_ja| 0|acc |52.00|± | 1.12|
|pawsx_ko| 0|acc |45.95|± | 1.11|
|pawsx_zh| 0|acc |51.30|± | 1.12|
## xglm-7.5B_xcopa_0-shot.json
| Task |Version|Metric|Value| |Stderr|
|--------|------:|------|----:|---|-----:|
|xcopa_et| 0|acc | 61.2|± | 2.18|
|xcopa_ht| 0|acc | 57.4|± | 2.21|
|xcopa_id| 0|acc | 69.4|± | 2.06|
|xcopa_it| 0|acc | 63.6|± | 2.15|
|xcopa_qu| 0|acc | 48.8|± | 2.24|
|xcopa_sw| 0|acc | 60.0|± | 2.19|
|xcopa_ta| 0|acc | 54.4|± | 2.23|
|xcopa_th| 0|acc | 59.4|± | 2.20|
|xcopa_tr| 0|acc | 58.4|± | 2.21|
|xcopa_vi| 0|acc | 70.2|± | 2.05|
|xcopa_zh| 0|acc | 63.8|± | 2.15|
## xglm-7.5B_xnli_0-shot.json
| Task |Version|Metric|Value| |Stderr|
|-------|------:|------|----:|---|-----:|
|xnli_ar| 0|acc |33.37|± | 0.67|
|xnli_bg| 0|acc |44.89|± | 0.70|
|xnli_de| 0|acc |48.98|± | 0.71|
|xnli_el| 0|acc |40.66|± | 0.69|
|xnli_en| 0|acc |53.85|± | 0.70|
|xnli_es| 0|acc |47.70|± | 0.71|
|xnli_fr| 0|acc |46.95|± | 0.71|
|xnli_hi| 0|acc |47.21|± | 0.71|
|xnli_ru| 0|acc |46.33|± | 0.70|
|xnli_sw| 0|acc |45.83|± | 0.70|
|xnli_th| 0|acc |43.71|± | 0.70|
|xnli_tr| 0|acc |46.27|± | 0.70|
|xnli_ur| 0|acc |42.10|± | 0.70|
|xnli_vi| 0|acc |46.33|± | 0.70|
|xnli_zh| 0|acc |35.37|± | 0.68|
## xglm-7.5B_xstory_cloze_0-shot.json
| Task |Version|Metric|Value| |Stderr|
|---------------|------:|------|----:|---|-----:|
|xstory_cloze_ar| 0|acc |56.19|± | 1.28|
|xstory_cloze_en| 0|acc |69.82|± | 1.18|
|xstory_cloze_es| 0|acc |64.06|± | 1.23|
|xstory_cloze_eu| 0|acc |57.71|± | 1.27|
|xstory_cloze_hi| 0|acc |58.77|± | 1.27|
|xstory_cloze_id| 0|acc |62.94|± | 1.24|
|xstory_cloze_my| 0|acc |57.11|± | 1.27|
|xstory_cloze_ru| 0|acc |63.53|± | 1.24|
|xstory_cloze_sw| 0|acc |59.30|± | 1.26|
|xstory_cloze_te| 0|acc |60.23|± | 1.26|
|xstory_cloze_zh| 0|acc |58.90|± | 1.27|
## xglm-7.5B_xwinograd_0-shot.json
| Task |Version|Metric|Value| |Stderr|
|------------|------:|------|----:|---|-----:|
|xwinograd_en| 0|acc |79.48|± | 0.84|
|xwinograd_fr| 0|acc |65.06|± | 5.27|
|xwinograd_jp| 0|acc |64.96|± | 1.54|
|xwinograd_pt| 0|acc |67.30|± | 2.90|
|xwinograd_ru| 0|acc |63.17|± | 2.72|
|xwinograd_zh| 0|acc |72.82|± | 1.98|
{
"results": {
"openbookqa": {
"acc": 0.254,
"acc_stderr": 0.019486596801643385,
"acc_norm": 0.358,
"acc_norm_stderr": 0.02146143486285912
},
"winogrande": {
"acc": 0.5785319652722968,
"acc_stderr": 0.0138780723774976
},
"arc_easy": {
"acc": 0.6237373737373737,
"acc_stderr": 0.009940646221513789,
"acc_norm": 0.5862794612794613,
"acc_norm_stderr": 0.010105878530238135
},
"copa": {
"acc": 0.79,
"acc_stderr": 0.040936018074033256
},
"mc_taco": {
"em": 0.13813813813813813,
"f1": 0.479152974631639
},
"wsc273": {
"acc": 0.7582417582417582,
"acc_stderr": 0.02596031999685269
},
"hellaswag": {
"acc": 0.45688109938259314,
"acc_stderr": 0.004971192387202445,
"acc_norm": 0.6123282214698267,
"acc_norm_stderr": 0.004862232790041574
},
"boolq": {
"acc": 0.6018348623853211,
"acc_stderr": 0.008561755594317445
},
"swag": {
"acc": 0.505148455463361,
"acc_stderr": 0.003534904635576977,
"acc_norm": 0.692292312306308,
"acc_norm_stderr": 0.003263207195550976
},
"piqa": {
"acc": 0.7393906420021763,
"acc_stderr": 0.010241826155811627,
"acc_norm": 0.749183895538629,
"acc_norm_stderr": 0.010113869547069046
},
"prost": {
"acc": 0.2588599487617421,
"acc_stderr": 0.0032000423309913543,
"acc_norm": 0.26361016225448336,
"acc_norm_stderr": 0.0032189046983713983
},
"arc_challenge": {
"acc": 0.28754266211604096,
"acc_stderr": 0.013226719056266129,
"acc_norm": 0.3191126279863481,
"acc_norm_stderr": 0.013621696119173304
}
},
"versions": {
"openbookqa": 0,
"winogrande": 0,
"arc_easy": 0,
"copa": 0,
"mc_taco": 0,
"wsc273": 0,
"hellaswag": 0,
"boolq": 1,
"swag": 0,
"piqa": 0,
"prost": 0,
"arc_challenge": 0
},
"config": {
"model": "hf-causal-experimental",
"model_args": "pretrained=facebook/xglm-7.5B,use_accelerate=True",
"num_fewshot": 0,
"batch_size": "auto",
"device": "cuda:0",
"no_cache": true,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}
{
"results": {
"gsm8k": {
"acc": 0.001516300227445034,
"acc_stderr": 0.0010717793485492655
}
},
"versions": {
"gsm8k": 0
},
"config": {
"model": "hf-causal-experimental",
"model_args": "pretrained=facebook/xglm-7.5B,use_accelerate=True",
"num_fewshot": 8,
"batch_size": "auto",
"device": "cuda",
"no_cache": true,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}
{
"results": {
"math_num_theory": {
"acc": 0.0,
"acc_stderr": 0.0
},
"gsm8k": {
"acc": 0.002274450341167551,
"acc_stderr": 0.0013121578148674316
},
"math_geometry": {
"acc": 0.0,
"acc_stderr": 0.0
},
"drop": {
"em": 0.05421560402684564,
"em_stderr": 0.002318984649948223,
"f1": 0.08962458053691245,
"f1_stderr": 0.0026401926224488034
},
"math_prealgebra": {
"acc": 0.0,
"acc_stderr": 0.0
},
"math_counting_and_prob": {
"acc": 0.0,
"acc_stderr": 0.0
},
"math_precalc": {
"acc": 0.0,
"acc_stderr": 0.0
},
"math_intermediate_algebra": {
"acc": 0.0,
"acc_stderr": 0.0
},
"math_algebra": {
"acc": 0.0,
"acc_stderr": 0.0
},
"mathqa": {
"acc": 0.23986599664991626,
"acc_stderr": 0.007816818250028128,
"acc_norm": 0.23517587939698492,
"acc_norm_stderr": 0.0077638612776946255
}
},
"versions": {
"math_num_theory": 1,
"gsm8k": 0,
"math_geometry": 1,
"drop": 1,
"math_prealgebra": 1,
"math_counting_and_prob": 1,
"math_precalc": 1,
"math_intermediate_algebra": 1,
"math_algebra": 1,
"mathqa": 0
},
"config": {
"model": "hf-causal-experimental",
"model_args": "pretrained=facebook/xglm-7.5B,use_accelerate=True",
"num_fewshot": 5,
"batch_size": "auto",
"device": "cuda:0",
"no_cache": true,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}
{
"results": {
"pawsx_en": {
"acc": 0.5885,
"acc_stderr": 0.011006563824537298
},
"pawsx_es": {
"acc": 0.528,
"acc_stderr": 0.011165587094621537
},
"pawsx_fr": {
"acc": 0.518,
"acc_stderr": 0.011175886999478619
},
"pawsx_zh": {
"acc": 0.513,
"acc_stderr": 0.01117935548207038
},
"pawsx_ja": {
"acc": 0.52,
"acc_stderr": 0.011174185930778312
},
"pawsx_de": {
"acc": 0.559,
"acc_stderr": 0.011105006104468736
},
"pawsx_ko": {
"acc": 0.4595,
"acc_stderr": 0.011146389370464362
}
},
"versions": {
"pawsx_en": 0,
"pawsx_es": 0,
"pawsx_fr": 0,
"pawsx_zh": 0,
"pawsx_ja": 0,
"pawsx_de": 0,
"pawsx_ko": 0
},
"config": {
"model": "hf-causal-experimental",
"model_args": "pretrained=facebook/xglm-7.5B,use_accelerate=True",
"num_fewshot": 0,
"batch_size": "auto",
"device": "cuda",
"no_cache": true,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}
{
"results": {
"xcopa_et": {
"acc": 0.612,
"acc_stderr": 0.021814300984787635
},
"xcopa_th": {
"acc": 0.594,
"acc_stderr": 0.02198396209008634
},
"xcopa_qu": {
"acc": 0.488,
"acc_stderr": 0.02237662679792717
},
"xcopa_ta": {
"acc": 0.544,
"acc_stderr": 0.02229623834840705
},
"xcopa_zh": {
"acc": 0.638,
"acc_stderr": 0.0215136625275824
},
"xcopa_vi": {
"acc": 0.702,
"acc_stderr": 0.02047511809298897
},
"xcopa_sw": {
"acc": 0.6,
"acc_stderr": 0.021930844120728505
},
"xcopa_it": {
"acc": 0.636,
"acc_stderr": 0.021539170637317685
},
"xcopa_tr": {
"acc": 0.584,
"acc_stderr": 0.022064943313928848
},
"xcopa_id": {
"acc": 0.694,
"acc_stderr": 0.0206295699983454
},
"xcopa_ht": {
"acc": 0.574,
"acc_stderr": 0.022136577335085637
}
},
"versions": {
"xcopa_et": 0,
"xcopa_th": 0,
"xcopa_qu": 0,
"xcopa_ta": 0,
"xcopa_zh": 0,
"xcopa_vi": 0,
"xcopa_sw": 0,
"xcopa_it": 0,
"xcopa_tr": 0,
"xcopa_id": 0,
"xcopa_ht": 0
},
"config": {
"model": "hf-causal-experimental",
"model_args": "pretrained=facebook/xglm-7.5B,use_accelerate=True",
"num_fewshot": 0,
"batch_size": "auto",
"device": "cuda",
"no_cache": true,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}
{
"results": {
"xnli_ar": {
"acc": 0.3337325349301397,
"acc_stderr": 0.00666266628252267
},
"xnli_bg": {
"acc": 0.4489021956087824,
"acc_stderr": 0.007027723874210379
},
"xnli_de": {
"acc": 0.48982035928143713,
"acc_stderr": 0.0070632481147059134
},
"xnli_el": {
"acc": 0.40658682634730536,
"acc_stderr": 0.006940323712177368
},
"xnli_en": {
"acc": 0.5385229540918164,
"acc_stderr": 0.0070437128985425335
},
"xnli_es": {
"acc": 0.47704590818363274,
"acc_stderr": 0.007057263845316342
},
"xnli_fr": {
"acc": 0.4694610778443114,
"acc_stderr": 0.007051522651006734
},
"xnli_hi": {
"acc": 0.4720558882235529,
"acc_stderr": 0.007053670508441103
},
"xnli_ru": {
"acc": 0.46327345309381235,
"acc_stderr": 0.007045628330322907
},
"xnli_sw": {
"acc": 0.45828343313373254,
"acc_stderr": 0.007040080446339805
},
"xnli_th": {
"acc": 0.437125748502994,
"acc_stderr": 0.007008633817895695
},
"xnli_tr": {
"acc": 0.4626746506986028,
"acc_stderr": 0.007045000071900887
},
"xnli_ur": {
"acc": 0.42095808383233535,
"acc_stderr": 0.006975878576227385
},
"xnli_vi": {
"acc": 0.46327345309381235,
"acc_stderr": 0.007045628330322896
},
"xnli_zh": {
"acc": 0.3536926147704591,
"acc_stderr": 0.006755492859492898
}
},
"versions": {
"xnli_ar": 0,
"xnli_bg": 0,
"xnli_de": 0,
"xnli_el": 0,
"xnli_en": 0,
"xnli_es": 0,
"xnli_fr": 0,
"xnli_hi": 0,
"xnli_ru": 0,
"xnli_sw": 0,
"xnli_th": 0,
"xnli_tr": 0,
"xnli_ur": 0,
"xnli_vi": 0,
"xnli_zh": 0
},
"config": {
"model": "hf-causal-experimental",
"model_args": "pretrained=facebook/xglm-7.5B,use_accelerate=True",
"num_fewshot": 0,
"batch_size": "auto",
"device": "cuda",
"no_cache": true,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}
{
"results": {
"xstory_cloze_es": {
"acc": 0.6406353408338848,
"acc_stderr": 0.012347659802101675
},
"xstory_cloze_zh": {
"acc": 0.5890138980807412,
"acc_stderr": 0.012661578894368948
},
"xstory_cloze_sw": {
"acc": 0.5929847782925215,
"acc_stderr": 0.012642664836816926
},
"xstory_cloze_en": {
"acc": 0.6982131039046989,
"acc_stderr": 0.011812877848905303
},
"xstory_cloze_hi": {
"acc": 0.5876902713434812,
"acc_stderr": 0.012667694122397068
},
"xstory_cloze_ar": {
"acc": 0.5618795499669094,
"acc_stderr": 0.012768206616277757
},
"xstory_cloze_eu": {
"acc": 0.5771012574454004,
"acc_stderr": 0.0127132250091262
},
"xstory_cloze_id": {
"acc": 0.6293845135671741,
"acc_stderr": 0.012428861084065903
},
"xstory_cloze_ru": {
"acc": 0.6353408338848445,
"acc_stderr": 0.012386781532906161
},
"xstory_cloze_te": {
"acc": 0.6022501654533422,
"acc_stderr": 0.012595197856703525
},
"xstory_cloze_my": {
"acc": 0.57114493712773,
"acc_stderr": 0.01273620271314778
}
},
"versions": {
"xstory_cloze_es": 0,
"xstory_cloze_zh": 0,
"xstory_cloze_sw": 0,
"xstory_cloze_en": 0,
"xstory_cloze_hi": 0,
"xstory_cloze_ar": 0,
"xstory_cloze_eu": 0,
"xstory_cloze_id": 0,
"xstory_cloze_ru": 0,
"xstory_cloze_te": 0,
"xstory_cloze_my": 0
},
"config": {
"model": "hf-causal-experimental",
"model_args": "pretrained=facebook/xglm-7.5B,use_accelerate=True",
"num_fewshot": 0,
"batch_size": "auto",
"device": "cuda",
"no_cache": true,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}
{
"results": {
"xwinograd_zh": {
"acc": 0.7281746031746031,
"acc_stderr": 0.01983712759311063
},
"xwinograd_ru": {
"acc": 0.6317460317460317,
"acc_stderr": 0.027219500732466696
},
"xwinograd_pt": {
"acc": 0.6730038022813688,
"acc_stderr": 0.028982074243683254
},
"xwinograd_en": {
"acc": 0.7948387096774193,
"acc_stderr": 0.008376626547826555
},
"xwinograd_jp": {
"acc": 0.6496350364963503,
"acc_stderr": 0.01541389159576608
},
"xwinograd_fr": {
"acc": 0.6506024096385542,
"acc_stderr": 0.05265151356440471
}
},
"versions": {
"xwinograd_zh": 0,
"xwinograd_ru": 0,
"xwinograd_pt": 0,
"xwinograd_en": 0,
"xwinograd_jp": 0,
"xwinograd_fr": 0
},
"config": {
"model": "hf-causal-experimental",
"model_args": "pretrained=facebook/xglm-7.5B,use_accelerate=True",
"num_fewshot": 0,
"batch_size": "auto",
"device": "cuda",
"no_cache": true,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}
...@@ -26,7 +26,7 @@ class DryrunLM(LM): ...@@ -26,7 +26,7 @@ class DryrunLM(LM):
def greedy_until(self, requests): def greedy_until(self, requests):
res = [] res = []
for ctx, until in requests: for ctx, _ in requests:
res.append("lol") res.append("lol")
# assume worst case - generates until 256 # assume worst case - generates until 256
......
"""
Usage:
python make_table_tasks.py --output <markdown_filename>
"""
import logging
from pytablewriter import MarkdownTableWriter, LatexTableWriter
import os
import json
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def make_table(result_dict):
"""Generate table of results."""
md_writer = MarkdownTableWriter()
latex_writer = LatexTableWriter()
md_writer.headers = ["Task", "Version", "Metric", "Value", "", "Stderr"]
latex_writer.headers = ["Task", "Version", "Metric", "Value", "", "Stderr"]
values = []
for k, dic in sorted(result_dict["results"].items()):
version = result_dict["versions"][k]
percent = k == "squad2"
for m, v in dic.items():
if m.endswith("_stderr"):
continue
if m + "_stderr" in dic:
se = dic[m + "_stderr"]
if percent or m == "ppl":
values.append([k, version, m, "%.2f" % v, "±", "%.2f" % se])
else:
values.append(
[k, version, m, "%.2f" % (v * 100), "±", "%.2f" % (se * 100)]
)
else:
if percent or m == "ppl":
values.append([k, version, m, "%.2f" % v, "", ""])
else:
values.append([k, version, m, "%.2f" % (v * 100), "", ""])
k = ""
version = ""
md_writer.value_matrix = values
latex_writer.value_matrix = values
# todo: make latex table look good
# print(latex_writer.dumps())
return md_writer.dumps()
if __name__ == "__main__":
# loop dirs and subdirs in results dir
# for each dir, load json files
for dirpath, dirnames, filenames in os.walk("../results"):
# skip dirs without files
if not filenames:
continue
path_readme = os.path.join(dirpath, "README.md")
with open(path_readme, "w") as f:
# get path name, only last folder
path_name = dirpath.split("/")[-1]
f.write(f"# {path_name} \n\n")
for filename in sorted([f for f in filenames if f.endswith(".json")]):
path = os.path.join(dirpath, filename)
with open(path, "r") as f:
result_dict = json.load(f)
with open(path_readme, "a") as f:
f.write(f"## {filename} \n")
f.write(f"{make_table(result_dict)} \n")
import argparse
import json
import os
import subprocess
import time
from pathlib import Path
from lm_eval import evaluator, utils
from lm_eval.api.registry import ALL_TASKS
seq2seq_models = ["google/flan-t5-small"]
causal_models = [
"gpt2",
"facebook/opt-125m",
"EleutherAI/gpt-neo-125m",
"EleutherAI/pythia-160m",
]
model_names = seq2seq_models + causal_models
completion_tasks = ["boolq", "lambada_openai", "winogrande"]
choice_tasks = ["hellaswag", "openbookqa", "piqa"]
perplexity_tasks = ["wikitext"]
generation_tasks = []
task_names = completion_tasks + choice_tasks + perplexity_tasks + generation_tasks
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--branches", default=[])
parser.add_argument("--models", default=model_names)
parser.add_argument("--tasks", default=task_names)
parser.add_argument("--acc_norm", type=bool, default=False)
parser.add_argument("--perplexity", default=None)
# TODO: implement num_fewshot and limit per task, e.g. task1:5,task2:1:100,task3::1000
parser.add_argument("--num_fewshot", type=int, default=0)
parser.add_argument("--limit", type=float, default=None)
# TODO: implement hf-auto to pick between causal and seq2seq models so we don't need this
parser.add_argument("--model", default="hf-causal")
# Use whatever is faster here
parser.add_argument("--model_args", default="use_accelerate=True,load_in_8bit=True")
parser.add_argument("--batch_size", default="auto")
return parser.parse_args()
def eval_models(args, branch=None):
if branch is not None:
if os.system(f"git checkout {branch}") != 0:
return {}, 0
branch = branch or initial_branch
start_time = time.time()
results = {}
for model in args.models:
model_type = (
"hf-causal"
if model in causal_models
else "hf-seq2seq"
if model in seq2seq_models
else args.model
)
model_args = f"pretrained={model},{args.model_args}"
# TODO: split_and_pad_windows in AutoSeq2SeqLM doesn"t exist, #527
tasks = (
args.tasks
if model in causal_models or model_type == "hf-causal"
else list(filter(lambda task: task not in perplexity_tasks, args.tasks))
)
# TODO: OOM with auto for seq2seq models, also can OOM with llama
batch_size = (
args.batch_size
if model in causal_models or model_type == "hf-causal"
else 64
if args.batch_size == "auto"
else args.batch_size
)
output_path = (
f"data/regression/{int(start_time)}-{branch}-{Path(model).name}.json"
)
command = (
f"python3 main.py --model {model_type} --model_args {model_args} --tasks {','.join(tasks)} "
f"--num_fewshot {args.num_fewshot}{'' if args.limit is None else f' --limit {args.limit}'} "
f"--batch_size {batch_size} --no_cache --output_path {output_path}"
)
print(
f"{'=' * 80}\nEvaluating {model} on {', '.join(tasks)} at {branch} with:\n\n{command}\n{'=' * 80}"
)
ret = os.system(command)
results[model] = json.load(open(output_path)) if ret == 0 else {"results": {}}
end_time = time.time()
return results, end_time - start_time
def extract_value(args, results, model, task, err=False):
if model not in results:
return 0
results = results[model]["results"]
if task not in results:
return 0
results = results[task]
if args.acc_norm and "acc_norm,none" in results:
return results["acc_norm,none"] if not err else results["acc_norm_stderr,none"]
if "acc,none" in results:
return results["acc,none"] if not err else results["acc_stderr,none"]
if (args.perplexity or "word_perplexity") + ",none" in results:
return (
results[(args.perplexity or "word_perplexity") + ",none"] if not err else 0
)
return 0
def format_value(args, results, model, task):
val = 100 * extract_value(args, results, model, task)
err = 100 * extract_value(args, results, model, task, err=True)
return f"{val:.2f}{f' ± {err:.2f}' if err != 0 else ''}"
def format_diff(args, results1, results2, model, task):
val1 = 100 * extract_value(args, results1, model, task)
val2 = 100 * extract_value(args, results2, model, task)
diff = val2 - val1
return f"**+{diff:.2f}**" if diff > 0 else f"{diff:.2f}"
def main():
args = parse_args()
args.branches = (
args.branches.split(",") if type(args.branches) == str else args.branches
)
args.models = args.models.split(",") if type(args.models) == str else args.models
args.tasks = (
ALL_TASKS
if args.tasks == "all_tasks"
else utils.pattern_match(args.tasks.split(","), ALL_TASKS)
if type(args.tasks) == str
else args.tasks
)
global initial_branch
initial_branch = (
subprocess.check_output("git branch --show-current", shell=True)
.decode("ascii")
.strip()
)
# TODO: implement proper timing for each task
# TODO: reduce IO by sharing tasks between models?
results, runtime = eval_models(args)
print(results, runtime)
runs = []
for branch in args.branches:
runs.append((branch, *eval_models(args, branch)))
os.system(f"git checkout {initial_branch}")
print("")
print(f"|task|{'|'.join(map(lambda model: Path(model).name, args.models))}|")
print(f"|--|{'--|' * len(args.models)}")
for task in args.tasks:
print(
f"|{task} ({initial_branch})|{'|'.join(map(lambda model: format_value(args, results, model, task), args.models))}|"
)
for branch, branch_results, branch_runtime in runs:
print(
f"|{task} ({branch})|{'|'.join(map(lambda model: format_value(args, branch_results, model, task), args.models))}|"
)
print(
f"|{task} (diff)|{'|'.join(map(lambda model: format_diff(args, results, branch_results, model, task), args.models))}|"
)
print("")
print("|branch|runtime|%|")
print("|--|--|--|")
print(f"|{initial_branch}|{runtime:.1f}s|100%|")
for branch, _, branch_runtime in runs:
print(f"|{branch}|{branch_runtime:.1f}s|{100 * branch_runtime / runtime:.2f}%|")
if __name__ == "__main__":
main()
...@@ -32,10 +32,10 @@ def main(): ...@@ -32,10 +32,10 @@ def main():
task_names = args.tasks.split(",") task_names = args.tasks.split(",")
task_dict = tasks.get_task_dict(task_names) task_dict = tasks.get_task_dict(task_names)
description_dict = {} # description_dict = {}
if args.description_dict_path: # if args.description_dict_path:
with open(args.description_dict_path, "r") as f: # with open(args.description_dict_path, "r") as f:
description_dict = json.load(f) # description_dict = json.load(f)
os.makedirs(args.output_base_path, exist_ok=True) os.makedirs(args.output_base_path, exist_ok=True)
for task_name, task in task_dict.items(): for task_name, task in task_dict.items():
...@@ -55,11 +55,11 @@ def main(): ...@@ -55,11 +55,11 @@ def main():
docs = join_iters(iters) docs = join_iters(iters)
description = ( # description = (
description_dict[task_name] # description_dict[task_name]
if description_dict and task_name in description_dict # if description_dict and task_name in description_dict
else "" # else ""
) # )
with open(os.path.join(args.output_base_path, task_name), "w") as f: with open(os.path.join(args.output_base_path, task_name), "w") as f:
for i, doc in ( for i, doc in (
...@@ -72,7 +72,7 @@ def main(): ...@@ -72,7 +72,7 @@ def main():
doc=doc, doc=doc,
num_fewshot=args.num_fewshot, num_fewshot=args.num_fewshot,
rnd=rnd, rnd=rnd,
description=description, # description=description,
) )
f.write(ctx + "\n") f.write(ctx + "\n")
......
...@@ -19,7 +19,7 @@ setuptools.setup( ...@@ -19,7 +19,7 @@ setuptools.setup(
"License :: OSI Approved :: MIT License", "License :: OSI Approved :: MIT License",
"Operating System :: OS Independent", "Operating System :: OS Independent",
], ],
python_requires=">=3.6", python_requires=">=3.9",
install_requires=[ install_requires=[
"accelerate>=0.18.0", "accelerate>=0.18.0",
"datasets>=2.0.0", "datasets>=2.0.0",
...@@ -47,5 +47,7 @@ setuptools.setup( ...@@ -47,5 +47,7 @@ setuptools.setup(
"promptsource": [ "promptsource": [
"promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource" "promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource"
], ],
"auto-gptq": ["auto-gptq[triton] @ git+https://github.com/PanQiWei/AutoGPTQ"],
"anthropic": ["anthropic"],
}, },
) )
# Task-name
### Paper
Title: `paper title goes here`
Abstract: `link to paper PDF or arXiv abstract goes here`
`Short description of paper / benchmark goes here:`
Homepage: `homepage to the benchmark's website goes here, if applicable`
### Citation
```
BibTeX-formatted citation goes here
```
### Subtasks
List or describe tasks defined in this folder, and their names here:
* `task_name`: `1-sentence description of what this particular task does`
* `task_name2`: .....
### Checklist
For adding novel benchmarks/datasets to the library:
* [ ] Is the task an existing benchmark in the literature?
* [ ] Have you referenced the original paper that introduced the task?
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [ ] Is the "Main" variant of this task clearly denoted?
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment