"magic_pdf/vscode:/vscode.git/clone" did not exist on "09269c845eeef745f44d93acf567e89a8a2d1871"
Commit baa8b0d3 authored by bzantium's avatar bzantium
Browse files

fix for merge from master

parent a956bc63
...@@ -61,7 +61,7 @@ _EMPTY_ADDITIONAL_ANSWER = { ...@@ -61,7 +61,7 @@ _EMPTY_ADDITIONAL_ANSWER = {
"span_end": -1, "span_end": -1,
"span_text": "", "span_text": "",
"input_text": "", "input_text": "",
"turn_id": -1 "turn_id": -1,
} }
], ],
"1": [ "1": [
...@@ -70,7 +70,7 @@ _EMPTY_ADDITIONAL_ANSWER = { ...@@ -70,7 +70,7 @@ _EMPTY_ADDITIONAL_ANSWER = {
"span_end": -1, "span_end": -1,
"span_text": "", "span_text": "",
"input_text": "", "input_text": "",
"turn_id": -1 "turn_id": -1,
} }
], ],
"2": [ "2": [
...@@ -79,7 +79,7 @@ _EMPTY_ADDITIONAL_ANSWER = { ...@@ -79,7 +79,7 @@ _EMPTY_ADDITIONAL_ANSWER = {
"span_end": -1, "span_end": -1,
"span_text": "", "span_text": "",
"input_text": "", "input_text": "",
"turn_id": -1 "turn_id": -1,
} }
], ],
} }
...@@ -91,8 +91,9 @@ class Coqa(datasets.GeneratorBasedBuilder): ...@@ -91,8 +91,9 @@ class Coqa(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.0.1") VERSION = datasets.Version("0.0.1")
BUILDER_CONFIGS = [ BUILDER_CONFIGS = [
datasets.BuilderConfig(name="coqa", version=VERSION, datasets.BuilderConfig(
description="The CoQA dataset."), name="coqa", version=VERSION, description="The CoQA dataset."
),
] ]
def _info(self): def _info(self):
...@@ -101,41 +102,52 @@ class Coqa(datasets.GeneratorBasedBuilder): ...@@ -101,41 +102,52 @@ class Coqa(datasets.GeneratorBasedBuilder):
"id": datasets.Value("string"), "id": datasets.Value("string"),
"source": datasets.Value("string"), "source": datasets.Value("string"),
"story": datasets.Value("string"), "story": datasets.Value("string"),
"questions": datasets.features.Sequence({ "questions": datasets.features.Sequence(
{
"input_text": datasets.Value("string"), "input_text": datasets.Value("string"),
"turn_id": datasets.Value("int32"), "turn_id": datasets.Value("int32"),
}), }
"answers": datasets.features.Sequence({ ),
"answers": datasets.features.Sequence(
{
"span_start": datasets.Value("int32"), "span_start": datasets.Value("int32"),
"span_end": datasets.Value("int32"), "span_end": datasets.Value("int32"),
"span_text": datasets.Value("string"), "span_text": datasets.Value("string"),
"input_text": datasets.Value("string"), "input_text": datasets.Value("string"),
"turn_id": datasets.Value("int32"), "turn_id": datasets.Value("int32"),
}), }
),
"additional_answers": { "additional_answers": {
"0": datasets.features.Sequence({ "0": datasets.features.Sequence(
{
"span_start": datasets.Value("int32"), "span_start": datasets.Value("int32"),
"span_end": datasets.Value("int32"), "span_end": datasets.Value("int32"),
"span_text": datasets.Value("string"), "span_text": datasets.Value("string"),
"input_text": datasets.Value("string"), "input_text": datasets.Value("string"),
"turn_id": datasets.Value("int32"), "turn_id": datasets.Value("int32"),
}), }
"1": datasets.features.Sequence({ ),
"1": datasets.features.Sequence(
{
"span_start": datasets.Value("int32"), "span_start": datasets.Value("int32"),
"span_end": datasets.Value("int32"), "span_end": datasets.Value("int32"),
"span_text": datasets.Value("string"), "span_text": datasets.Value("string"),
"input_text": datasets.Value("string"), "input_text": datasets.Value("string"),
"turn_id": datasets.Value("int32"), "turn_id": datasets.Value("int32"),
}), }
"2": datasets.features.Sequence({ ),
"2": datasets.features.Sequence(
{
"span_start": datasets.Value("int32"), "span_start": datasets.Value("int32"),
"span_end": datasets.Value("int32"), "span_end": datasets.Value("int32"),
"span_text": datasets.Value("string"), "span_text": datasets.Value("string"),
"input_text": datasets.Value("string"), "input_text": datasets.Value("string"),
"turn_id": datasets.Value("int32"), "turn_id": datasets.Value("int32"),
}),
} }
}) ),
},
}
)
return datasets.DatasetInfo( return datasets.DatasetInfo(
description=_DESCRIPTION, description=_DESCRIPTION,
features=features, features=features,
...@@ -175,10 +187,7 @@ class Coqa(datasets.GeneratorBasedBuilder): ...@@ -175,10 +187,7 @@ class Coqa(datasets.GeneratorBasedBuilder):
source = row["source"] source = row["source"]
story = row["story"] story = row["story"]
questions = [ questions = [
{ {"input_text": q["input_text"], "turn_id": q["turn_id"]}
"input_text": q["input_text"],
"turn_id": q["turn_id"]
}
for q in row["questions"] for q in row["questions"]
] ]
answers = [ answers = [
...@@ -187,7 +196,7 @@ class Coqa(datasets.GeneratorBasedBuilder): ...@@ -187,7 +196,7 @@ class Coqa(datasets.GeneratorBasedBuilder):
"span_end": a["span_end"], "span_end": a["span_end"],
"span_text": a["span_text"], "span_text": a["span_text"],
"input_text": a["input_text"], "input_text": a["input_text"],
"turn_id": a["turn_id"] "turn_id": a["turn_id"],
} }
for a in row["answers"] for a in row["answers"]
] ]
...@@ -201,7 +210,7 @@ class Coqa(datasets.GeneratorBasedBuilder): ...@@ -201,7 +210,7 @@ class Coqa(datasets.GeneratorBasedBuilder):
"span_end": a0["span_end"], "span_end": a0["span_end"],
"span_text": a0["span_text"], "span_text": a0["span_text"],
"input_text": a0["input_text"], "input_text": a0["input_text"],
"turn_id": a0["turn_id"] "turn_id": a0["turn_id"],
} }
for a0 in row["additional_answers"]["0"] for a0 in row["additional_answers"]["0"]
], ],
...@@ -211,7 +220,7 @@ class Coqa(datasets.GeneratorBasedBuilder): ...@@ -211,7 +220,7 @@ class Coqa(datasets.GeneratorBasedBuilder):
"span_end": a1["span_end"], "span_end": a1["span_end"],
"span_text": a1["span_text"], "span_text": a1["span_text"],
"input_text": a1["input_text"], "input_text": a1["input_text"],
"turn_id": a1["turn_id"] "turn_id": a1["turn_id"],
} }
for a1 in row["additional_answers"]["1"] for a1 in row["additional_answers"]["1"]
], ],
...@@ -221,7 +230,7 @@ class Coqa(datasets.GeneratorBasedBuilder): ...@@ -221,7 +230,7 @@ class Coqa(datasets.GeneratorBasedBuilder):
"span_end": a2["span_end"], "span_end": a2["span_end"],
"span_text": a2["span_text"], "span_text": a2["span_text"],
"input_text": a2["input_text"], "input_text": a2["input_text"],
"turn_id": a2["turn_id"] "turn_id": a2["turn_id"],
} }
for a2 in row["additional_answers"]["2"] for a2 in row["additional_answers"]["2"]
], ],
...@@ -232,5 +241,5 @@ class Coqa(datasets.GeneratorBasedBuilder): ...@@ -232,5 +241,5 @@ class Coqa(datasets.GeneratorBasedBuilder):
"source": source, "source": source,
"questions": questions, "questions": questions,
"answers": answers, "answers": answers,
"additional_answers": additional_answers "additional_answers": additional_answers,
} }
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
# Custom DROP dataet that, unlike HF, keeps all question-answer pairs # Custom DROP dataset that, unlike HF, keeps all question-answer pairs
# even if there are multiple types of answers for the same question. # even if there are multiple types of answers for the same question.
"""DROP dataset.""" """DROP dataset."""
...@@ -50,7 +50,8 @@ _URLS = { ...@@ -50,7 +50,8 @@ _URLS = {
"drop": "https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip", "drop": "https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip",
} }
_EMPTY_VALIDATED_ANSWER = [{ _EMPTY_VALIDATED_ANSWER = [
{
"number": "", "number": "",
"date": { "date": {
"day": "", "day": "",
...@@ -59,8 +60,9 @@ _EMPTY_VALIDATED_ANSWER = [{ ...@@ -59,8 +60,9 @@ _EMPTY_VALIDATED_ANSWER = [{
}, },
"spans": [], "spans": [],
"worker_id": "", "worker_id": "",
"hit_id": "" "hit_id": "",
}] }
]
class Drop(datasets.GeneratorBasedBuilder): class Drop(datasets.GeneratorBasedBuilder):
...@@ -69,12 +71,14 @@ class Drop(datasets.GeneratorBasedBuilder): ...@@ -69,12 +71,14 @@ class Drop(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.0.1") VERSION = datasets.Version("0.0.1")
BUILDER_CONFIGS = [ BUILDER_CONFIGS = [
datasets.BuilderConfig(name="drop", version=VERSION, datasets.BuilderConfig(
description="The DROP dataset."), name="drop", version=VERSION, description="The DROP dataset."
),
] ]
def _info(self): def _info(self):
features = datasets.Features({ features = datasets.Features(
{
"section_id": datasets.Value("string"), "section_id": datasets.Value("string"),
"passage": datasets.Value("string"), "passage": datasets.Value("string"),
"question": datasets.Value("string"), "question": datasets.Value("string"),
...@@ -90,7 +94,8 @@ class Drop(datasets.GeneratorBasedBuilder): ...@@ -90,7 +94,8 @@ class Drop(datasets.GeneratorBasedBuilder):
"worker_id": datasets.Value("string"), "worker_id": datasets.Value("string"),
"hit_id": datasets.Value("string"), "hit_id": datasets.Value("string"),
}, },
"validated_answers": datasets.features.Sequence({ "validated_answers": datasets.features.Sequence(
{
"number": datasets.Value("string"), "number": datasets.Value("string"),
"date": { "date": {
"day": datasets.Value("string"), "day": datasets.Value("string"),
...@@ -100,8 +105,10 @@ class Drop(datasets.GeneratorBasedBuilder): ...@@ -100,8 +105,10 @@ class Drop(datasets.GeneratorBasedBuilder):
"spans": datasets.features.Sequence(datasets.Value("string")), "spans": datasets.features.Sequence(datasets.Value("string")),
"worker_id": datasets.Value("string"), "worker_id": datasets.Value("string"),
"hit_id": datasets.Value("string"), "hit_id": datasets.Value("string"),
}), }
}) ),
}
)
return datasets.DatasetInfo( return datasets.DatasetInfo(
description=_DESCRIPTION, description=_DESCRIPTION,
features=features, features=features,
...@@ -118,7 +125,9 @@ class Drop(datasets.GeneratorBasedBuilder): ...@@ -118,7 +125,9 @@ class Drop(datasets.GeneratorBasedBuilder):
name=datasets.Split.TRAIN, name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples # These kwargs will be passed to _generate_examples
gen_kwargs={ gen_kwargs={
"filepath": os.path.join(data_dir, "drop_dataset", "drop_dataset_train.json"), "filepath": os.path.join(
data_dir, "drop_dataset", "drop_dataset_train.json"
),
"split": "train", "split": "train",
}, },
), ),
...@@ -126,7 +135,9 @@ class Drop(datasets.GeneratorBasedBuilder): ...@@ -126,7 +135,9 @@ class Drop(datasets.GeneratorBasedBuilder):
name=datasets.Split.VALIDATION, name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples # These kwargs will be passed to _generate_examples
gen_kwargs={ gen_kwargs={
"filepath": os.path.join(data_dir, "drop_dataset", "drop_dataset_dev.json"), "filepath": os.path.join(
data_dir, "drop_dataset", "drop_dataset_dev.json"
),
"split": "validation", "split": "validation",
}, },
), ),
......
{"gsm8k": {"description": "State-of-the-art language models can match human performance on many tasks, but \nthey still struggle to robustly perform multi-step mathematical reasoning. To \ndiagnose the failures of current models and support research, we introduce GSM8K,\na dataset of 8.5K high quality linguistically diverse grade school math word problems.\nWe find that even the largest transformer models fail to achieve high test performance, \ndespite the conceptual simplicity of this problem distribution.\n", "citation": "@misc{cobbe2021training,\n title={Training Verifiers to Solve Math Word Problems},\n author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman},\n year={2021},\n eprint={2110.14168},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n", "homepage": "https://github.com/openai/grade-school-math", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "gsm8_k", "config_name": "gsm8k", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 3963202, "num_examples": 7473, "dataset_name": "gsm8_k"}, "test": {"name": "test", "num_bytes": 713732, "num_examples": 1319, "dataset_name": "gsm8_k"}}, "download_checksums": {"https://raw.githubusercontent.com/openai/grade-school-math/master/grade_school_math/data/train.jsonl": {"num_bytes": 4166206, "checksum": "17f347dc51477c50d4efb83959dbb7c56297aba886e5544ee2aaed3024813465"}, "https://raw.githubusercontent.com/openai/grade-school-math/master/grade_school_math/data/test.jsonl": {"num_bytes": 749738, "checksum": "3730d312f6e3440559ace48831e51066acaca737f6eabec99bccb9e4b3c39d14"}}, "download_size": 4915944, "post_processing_size": null, "dataset_size": 4676934, "size_in_bytes": 9592878}}
\ No newline at end of file
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Grade School Math 8k dataset."""
import json
import datasets
_CITATION = """\
@misc{cobbe2021training,
title={Training Verifiers to Solve Math Word Problems},
author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman},
year={2021},
eprint={2110.14168},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
_DESCRIPTION = """\
State-of-the-art language models can match human performance on many tasks, but
they still struggle to robustly perform multi-step mathematical reasoning. To
diagnose the failures of current models and support research, we introduce GSM8K,
a dataset of 8.5K high quality linguistically diverse grade school math word problems.
We find that even the largest transformer models fail to achieve high test performance,
despite the conceptual simplicity of this problem distribution.
"""
_HOMEPAGE = "https://github.com/openai/grade-school-math"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_URLS = {
"train": "https://raw.githubusercontent.com/openai/grade-school-math/master/grade_school_math/data/train.jsonl",
"test": "https://raw.githubusercontent.com/openai/grade-school-math/master/grade_school_math/data/test.jsonl",
}
class GSM8K(datasets.GeneratorBasedBuilder):
"""Grade School Math 8k"""
VERSION = datasets.Version("0.0.1")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="gsm8k", version=VERSION,
description="The Grade School Math 8k dataset."),
]
def _info(self):
features = datasets.Features(
{
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = {"train": _URLS["train"], "test": _URLS["test"]}
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir["train"],
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir["test"],
"split": "test"
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
yield key, {
"question": data["question"],
"answer": data["answer"],
}
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
...@@ -65,8 +64,12 @@ class HeadQA(datasets.GeneratorBasedBuilder): ...@@ -65,8 +64,12 @@ class HeadQA(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0") VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [ BUILDER_CONFIGS = [
datasets.BuilderConfig(name="es", version=VERSION, description="Spanish HEAD dataset"), datasets.BuilderConfig(
datasets.BuilderConfig(name="en", version=VERSION, description="English HEAD dataset"), name="es", version=VERSION, description="Spanish HEAD dataset"
),
datasets.BuilderConfig(
name="en", version=VERSION, description="English HEAD dataset"
),
] ]
DEFAULT_CONFIG_NAME = "es" DEFAULT_CONFIG_NAME = "es"
...@@ -106,15 +109,24 @@ class HeadQA(datasets.GeneratorBasedBuilder): ...@@ -106,15 +109,24 @@ class HeadQA(datasets.GeneratorBasedBuilder):
return [ return [
datasets.SplitGenerator( datasets.SplitGenerator(
name=datasets.Split.TRAIN, name=datasets.Split.TRAIN,
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_lang_dir, f"train_{dir}.json")}, gen_kwargs={
"data_dir": data_dir,
"filepath": os.path.join(data_lang_dir, f"train_{dir}.json"),
},
), ),
datasets.SplitGenerator( datasets.SplitGenerator(
name=datasets.Split.TEST, name=datasets.Split.TEST,
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_lang_dir, f"test_{dir}.json")}, gen_kwargs={
"data_dir": data_dir,
"filepath": os.path.join(data_lang_dir, f"test_{dir}.json"),
},
), ),
datasets.SplitGenerator( datasets.SplitGenerator(
name=datasets.Split.VALIDATION, name=datasets.Split.VALIDATION,
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_lang_dir, f"dev_{dir}.json")}, gen_kwargs={
"data_dir": data_dir,
"filepath": os.path.join(data_lang_dir, f"dev_{dir}.json"),
},
), ),
] ]
...@@ -134,7 +146,9 @@ class HeadQA(datasets.GeneratorBasedBuilder): ...@@ -134,7 +146,9 @@ class HeadQA(datasets.GeneratorBasedBuilder):
aids = [answer["aid"] for answer in question["answers"]] aids = [answer["aid"] for answer in question["answers"]]
atexts = [answer["atext"].strip() for answer in question["answers"]] atexts = [answer["atext"].strip() for answer in question["answers"]]
answers = [{"aid": aid, "atext": atext} for aid, atext in zip(aids, atexts)] answers = [
{"aid": aid, "atext": atext} for aid, atext in zip(aids, atexts)
]
id_ = f"{exam_id}_{qid}" id_ = f"{exam_id}_{qid}"
yield id_, { yield id_, {
......
{"original": {"description": "LAMBADA is a dataset to evaluate the capabilities of computational models for text\nunderstanding by means of a word prediction task. LAMBADA is a collection of narrative\ntexts sharing the characteristic that human subjects are able to guess their last\nword if they are exposed to the whole text, but not if they only see the last\nsentence preceding the target word. To succeed on LAMBADA, computational models\ncannot simply rely on local context, but must be able to keep track of information\nin the broader discourse.\n\nThe LAMBADA dataset", "citation": "@misc{\n author={Paperno, Denis and Kruszewski, Germ\u00e1n and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fern\u00e1ndez, Raquel}, \n title={The LAMBADA dataset},\n DOI={10.5281/zenodo.2630551},\n publisher={Zenodo},\n year={2016},\n month={Aug}\n}\n", "homepage": "https://zenodo.org/record/2630551#.X4Xzn5NKjUI", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lambada", "config_name": "original", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"validation": {"name": "validation", "num_bytes": 1709449, "num_examples": 5153, "dataset_name": "lambada"}}, "download_checksums": {"http://eaidata.bmk.sh/data/lambada_test.jsonl": {"num_bytes": 1819752, "checksum": "4aa8d02cd17c719165fc8a7887fddd641f43fcafa4b1c806ca8abc31fabdb226"}}, "download_size": 1819752, "post_processing_size": null, "dataset_size": 1709449, "size_in_bytes": 3529201}, "en": {"description": "LAMBADA is a dataset to evaluate the capabilities of computational models for text\nunderstanding by means of a word prediction task. LAMBADA is a collection of narrative\ntexts sharing the characteristic that human subjects are able to guess their last\nword if they are exposed to the whole text, but not if they only see the last\nsentence preceding the target word. To succeed on LAMBADA, computational models\ncannot simply rely on local context, but must be able to keep track of information\nin the broader discourse.\n\nThe English translated LAMBADA dataset", "citation": "@misc{\n author={Paperno, Denis and Kruszewski, Germ\u00e1n and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fern\u00e1ndez, Raquel}, \n title={The LAMBADA dataset},\n DOI={10.5281/zenodo.2630551},\n publisher={Zenodo},\n year={2016},\n month={Aug}\n}\n", "homepage": "https://zenodo.org/record/2630551#.X4Xzn5NKjUI", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lambada", "config_name": "en", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"validation": {"name": "validation", "num_bytes": 1709449, "num_examples": 5153, "dataset_name": "lambada"}}, "download_checksums": {"http://eaidata.bmk.sh/data/lambada_test_en.jsonl": {"num_bytes": 1819752, "checksum": "4aa8d02cd17c719165fc8a7887fddd641f43fcafa4b1c806ca8abc31fabdb226"}}, "download_size": 1819752, "post_processing_size": null, "dataset_size": 1709449, "size_in_bytes": 3529201}, "fr": {"description": "LAMBADA is a dataset to evaluate the capabilities of computational models for text\nunderstanding by means of a word prediction task. LAMBADA is a collection of narrative\ntexts sharing the characteristic that human subjects are able to guess their last\nword if they are exposed to the whole text, but not if they only see the last\nsentence preceding the target word. To succeed on LAMBADA, computational models\ncannot simply rely on local context, but must be able to keep track of information\nin the broader discourse.\n\nThe French translated LAMBADA dataset", "citation": "@misc{\n author={Paperno, Denis and Kruszewski, Germ\u00e1n and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fern\u00e1ndez, Raquel}, \n title={The LAMBADA dataset},\n DOI={10.5281/zenodo.2630551},\n publisher={Zenodo},\n year={2016},\n month={Aug}\n}\n", "homepage": "https://zenodo.org/record/2630551#.X4Xzn5NKjUI", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lambada", "config_name": "fr", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"validation": {"name": "validation", "num_bytes": 1948795, "num_examples": 5153, "dataset_name": "lambada"}}, "download_checksums": {"http://eaidata.bmk.sh/data/lambada_test_fr.jsonl": {"num_bytes": 2028703, "checksum": "941ec6a73dba7dc91c860bf493eb66a527cd430148827a4753a4535a046bf362"}}, "download_size": 2028703, "post_processing_size": null, "dataset_size": 1948795, "size_in_bytes": 3977498}, "de": {"description": "LAMBADA is a dataset to evaluate the capabilities of computational models for text\nunderstanding by means of a word prediction task. LAMBADA is a collection of narrative\ntexts sharing the characteristic that human subjects are able to guess their last\nword if they are exposed to the whole text, but not if they only see the last\nsentence preceding the target word. To succeed on LAMBADA, computational models\ncannot simply rely on local context, but must be able to keep track of information\nin the broader discourse.\n\nThe German translated LAMBADA dataset", "citation": "@misc{\n author={Paperno, Denis and Kruszewski, Germ\u00e1n and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fern\u00e1ndez, Raquel}, \n title={The LAMBADA dataset},\n DOI={10.5281/zenodo.2630551},\n publisher={Zenodo},\n year={2016},\n month={Aug}\n}\n", "homepage": "https://zenodo.org/record/2630551#.X4Xzn5NKjUI", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lambada", "config_name": "de", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"validation": {"name": "validation", "num_bytes": 1904576, "num_examples": 5153, "dataset_name": "lambada"}}, "download_checksums": {"http://eaidata.bmk.sh/data/lambada_test_de.jsonl": {"num_bytes": 1985231, "checksum": "51c6c1795894c46e88e4c104b5667f488efe79081fb34d746b82b8caa663865e"}}, "download_size": 1985231, "post_processing_size": null, "dataset_size": 1904576, "size_in_bytes": 3889807}, "it": {"description": "LAMBADA is a dataset to evaluate the capabilities of computational models for text\nunderstanding by means of a word prediction task. LAMBADA is a collection of narrative\ntexts sharing the characteristic that human subjects are able to guess their last\nword if they are exposed to the whole text, but not if they only see the last\nsentence preceding the target word. To succeed on LAMBADA, computational models\ncannot simply rely on local context, but must be able to keep track of information\nin the broader discourse.\n\nThe Italian translated LAMBADA dataset", "citation": "@misc{\n author={Paperno, Denis and Kruszewski, Germ\u00e1n and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fern\u00e1ndez, Raquel}, \n title={The LAMBADA dataset},\n DOI={10.5281/zenodo.2630551},\n publisher={Zenodo},\n year={2016},\n month={Aug}\n}\n", "homepage": "https://zenodo.org/record/2630551#.X4Xzn5NKjUI", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lambada", "config_name": "it", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"validation": {"name": "validation", "num_bytes": 1813420, "num_examples": 5153, "dataset_name": "lambada"}}, "download_checksums": {"http://eaidata.bmk.sh/data/lambada_test_it.jsonl": {"num_bytes": 1894613, "checksum": "86654237716702ab74f42855ae5a78455c1b0e50054a4593fb9c6fcf7fad0850"}}, "download_size": 1894613, "post_processing_size": null, "dataset_size": 1813420, "size_in_bytes": 3708033}, "es": {"description": "LAMBADA is a dataset to evaluate the capabilities of computational models for text\nunderstanding by means of a word prediction task. LAMBADA is a collection of narrative\ntexts sharing the characteristic that human subjects are able to guess their last\nword if they are exposed to the whole text, but not if they only see the last\nsentence preceding the target word. To succeed on LAMBADA, computational models\ncannot simply rely on local context, but must be able to keep track of information\nin the broader discourse.\n\nThe Spanish translated LAMBADA dataset", "citation": "@misc{\n author={Paperno, Denis and Kruszewski, Germ\u00e1n and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fern\u00e1ndez, Raquel}, \n title={The LAMBADA dataset},\n DOI={10.5281/zenodo.2630551},\n publisher={Zenodo},\n year={2016},\n month={Aug}\n}\n", "homepage": "https://zenodo.org/record/2630551#.X4Xzn5NKjUI", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lambada", "config_name": "es", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"validation": {"name": "validation", "num_bytes": 1821735, "num_examples": 5153, "dataset_name": "lambada"}}, "download_checksums": {"http://eaidata.bmk.sh/data/lambada_test_es.jsonl": {"num_bytes": 1902349, "checksum": "ffd760026c647fb43c67ce1bc56fd527937304b348712dce33190ea6caba6f9c"}}, "download_size": 1902349, "post_processing_size": null, "dataset_size": 1821735, "size_in_bytes": 3724084}}
\ No newline at end of file
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment