Commit 6432933d authored by lintangsutawika's avatar lintangsutawika
Browse files

updated notebook

parent 0801d1ff
...@@ -39,7 +39,7 @@ repos: ...@@ -39,7 +39,7 @@ repos:
- id: codespell - id: codespell
exclude: > exclude: >
(?x)^( (?x)^(
.*\.json|ignore.txt|lm_eval/tasks/.*|.*yaml .*\.json|ignore.txt|lm_eval/tasks/.*|.*yaml|.*\.ipynb
)$ )$
args: [--check-filenames, --check-hidden, --ignore-words=ignore.txt] args: [--check-filenames, --check-hidden, --ignore-words=ignore.txt]
- repo: https://github.com/pre-commit/mirrors-mypy - repo: https://github.com/pre-commit/mirrors-mypy
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
"id": "Qw83KAePAhaS" "id": "Qw83KAePAhaS"
}, },
"source": [ "source": [
"# Releasing LM-Evaluation-Harness v0.5.0" "# Releasing LM-Evaluation-Harness v0.4.0"
] ]
}, },
{ {
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
"base_uri": "https://localhost:8080/" "base_uri": "https://localhost:8080/"
}, },
"id": "8hiosGzq_qZg", "id": "8hiosGzq_qZg",
"outputId": "c6fe3ead-d9ef-489a-8b13-0424e1b26cb4" "outputId": "6ab73e5e-1f54-417e-a388-07e0d870b132"
}, },
"outputs": [ "outputs": [
{ {
...@@ -66,42 +66,42 @@ ...@@ -66,42 +66,42 @@
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"Collecting git+https://github.com/EleutherAI/lm-evaluation-harness.git@big-refactor\n", "Collecting git+https://github.com/EleutherAI/lm-evaluation-harness.git@big-refactor\n",
" Cloning https://github.com/EleutherAI/lm-evaluation-harness.git (to revision big-refactor) to /tmp/pip-req-build-l2xli32o\n", " Cloning https://github.com/EleutherAI/lm-evaluation-harness.git (to revision big-refactor) to /tmp/pip-req-build-tnssql5s\n",
" Running command git clone --filter=blob:none --quiet https://github.com/EleutherAI/lm-evaluation-harness.git /tmp/pip-req-build-l2xli32o\n", " Running command git clone --filter=blob:none --quiet https://github.com/EleutherAI/lm-evaluation-harness.git /tmp/pip-req-build-tnssql5s\n",
" Running command git checkout -b big-refactor --track origin/big-refactor\n", " Running command git checkout -b big-refactor --track origin/big-refactor\n",
" Switched to a new branch 'big-refactor'\n", " Switched to a new branch 'big-refactor'\n",
" Branch 'big-refactor' set up to track remote branch 'big-refactor' from 'origin'.\n", " Branch 'big-refactor' set up to track remote branch 'big-refactor' from 'origin'.\n",
" Resolved https://github.com/EleutherAI/lm-evaluation-harness.git to commit 30936bc7c6e03c41e1f0d45b68a02b0f39e399f2\n", " Resolved https://github.com/EleutherAI/lm-evaluation-harness.git to commit 42f486ee49b65926a444cb0620870a39a5b4b0a8\n",
" Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
" Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n", " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
" Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
"Collecting accelerate>=0.21.0 (from lm-eval==1.0.0)\n", "Collecting accelerate>=0.21.0 (from lm-eval==1.0.0)\n",
" Downloading accelerate-0.24.1-py3-none-any.whl (261 kB)\n", " Downloading accelerate-0.24.1-py3-none-any.whl (261 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m261.4/261.4 kB\u001b[0m \u001b[31m3.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m261.4/261.4 kB\u001b[0m \u001b[31m4.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting evaluate (from lm-eval==1.0.0)\n", "\u001b[?25hCollecting evaluate (from lm-eval==1.0.0)\n",
" Downloading evaluate-0.4.1-py3-none-any.whl (84 kB)\n", " Downloading evaluate-0.4.1-py3-none-any.whl (84 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m84.1/84.1 kB\u001b[0m \u001b[31m9.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m84.1/84.1 kB\u001b[0m \u001b[31m5.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting datasets>=2.0.0 (from lm-eval==1.0.0)\n", "\u001b[?25hCollecting datasets>=2.0.0 (from lm-eval==1.0.0)\n",
" Downloading datasets-2.15.0-py3-none-any.whl (521 kB)\n", " Downloading datasets-2.15.0-py3-none-any.whl (521 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m521.2/521.2 kB\u001b[0m \u001b[31m9.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m521.2/521.2 kB\u001b[0m \u001b[31m9.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting jsonlines (from lm-eval==1.0.0)\n", "\u001b[?25hCollecting jsonlines (from lm-eval==1.0.0)\n",
" Downloading jsonlines-4.0.0-py3-none-any.whl (8.7 kB)\n", " Downloading jsonlines-4.0.0-py3-none-any.whl (8.7 kB)\n",
"Requirement already satisfied: numexpr in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (2.8.7)\n", "Requirement already satisfied: numexpr in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (2.8.7)\n",
"Collecting peft>=0.2.0 (from lm-eval==1.0.0)\n", "Collecting peft>=0.2.0 (from lm-eval==1.0.0)\n",
" Downloading peft-0.6.2-py3-none-any.whl (174 kB)\n", " Downloading peft-0.6.2-py3-none-any.whl (174 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m174.7/174.7 kB\u001b[0m \u001b[31m22.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m174.7/174.7 kB\u001b[0m \u001b[31m7.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting pybind11>=2.6.2 (from lm-eval==1.0.0)\n", "\u001b[?25hCollecting pybind11>=2.6.2 (from lm-eval==1.0.0)\n",
" Downloading pybind11-2.11.1-py3-none-any.whl (227 kB)\n", " Downloading pybind11-2.11.1-py3-none-any.whl (227 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m227.7/227.7 kB\u001b[0m \u001b[31m20.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m227.7/227.7 kB\u001b[0m \u001b[31m12.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting pytablewriter (from lm-eval==1.0.0)\n", "\u001b[?25hCollecting pytablewriter (from lm-eval==1.0.0)\n",
" Downloading pytablewriter-1.2.0-py3-none-any.whl (111 kB)\n", " Downloading pytablewriter-1.2.0-py3-none-any.whl (111 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m111.1/111.1 kB\u001b[0m \u001b[31m12.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m111.1/111.1 kB\u001b[0m \u001b[31m8.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting rouge-score>=0.0.4 (from lm-eval==1.0.0)\n", "\u001b[?25hCollecting rouge-score>=0.0.4 (from lm-eval==1.0.0)\n",
" Downloading rouge_score-0.1.2.tar.gz (17 kB)\n", " Downloading rouge_score-0.1.2.tar.gz (17 kB)\n",
" Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
"Collecting sacrebleu>=1.5.0 (from lm-eval==1.0.0)\n", "Collecting sacrebleu>=1.5.0 (from lm-eval==1.0.0)\n",
" Downloading sacrebleu-2.3.2-py3-none-any.whl (119 kB)\n", " Downloading sacrebleu-2.3.2-py3-none-any.whl (119 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m119.7/119.7 kB\u001b[0m \u001b[31m12.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m119.7/119.7 kB\u001b[0m \u001b[31m8.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: scikit-learn>=0.24.1 in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (1.2.2)\n", "\u001b[?25hRequirement already satisfied: scikit-learn>=0.24.1 in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (1.2.2)\n",
"Collecting sqlitedict (from lm-eval==1.0.0)\n", "Collecting sqlitedict (from lm-eval==1.0.0)\n",
" Downloading sqlitedict-2.1.0.tar.gz (21 kB)\n", " Downloading sqlitedict-2.1.0.tar.gz (21 kB)\n",
...@@ -112,7 +112,7 @@ ...@@ -112,7 +112,7 @@
"Requirement already satisfied: transformers>=4.1 in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (4.35.2)\n", "Requirement already satisfied: transformers>=4.1 in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (4.35.2)\n",
"Collecting zstandard (from lm-eval==1.0.0)\n", "Collecting zstandard (from lm-eval==1.0.0)\n",
" Downloading zstandard-0.22.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.4 MB)\n", " Downloading zstandard-0.22.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.4 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.4/5.4 MB\u001b[0m \u001b[31m30.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.4/5.4 MB\u001b[0m \u001b[31m29.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (1.23.5)\n", "\u001b[?25hRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (1.23.5)\n",
"Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (23.2)\n", "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (23.2)\n",
"Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (5.9.5)\n", "Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (5.9.5)\n",
...@@ -123,14 +123,14 @@ ...@@ -123,14 +123,14 @@
" Downloading pyarrow_hotfix-0.6-py3-none-any.whl (7.9 kB)\n", " Downloading pyarrow_hotfix-0.6-py3-none-any.whl (7.9 kB)\n",
"Collecting dill<0.3.8,>=0.3.0 (from datasets>=2.0.0->lm-eval==1.0.0)\n", "Collecting dill<0.3.8,>=0.3.0 (from datasets>=2.0.0->lm-eval==1.0.0)\n",
" Downloading dill-0.3.7-py3-none-any.whl (115 kB)\n", " Downloading dill-0.3.7-py3-none-any.whl (115 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m115.3/115.3 kB\u001b[0m \u001b[31m16.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m115.3/115.3 kB\u001b[0m \u001b[31m14.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (1.5.3)\n", "\u001b[?25hRequirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (1.5.3)\n",
"Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (2.31.0)\n", "Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (2.31.0)\n",
"Requirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (4.66.1)\n", "Requirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (4.66.1)\n",
"Requirement already satisfied: xxhash in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (3.4.1)\n", "Requirement already satisfied: xxhash in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (3.4.1)\n",
"Collecting multiprocess (from datasets>=2.0.0->lm-eval==1.0.0)\n", "Collecting multiprocess (from datasets>=2.0.0->lm-eval==1.0.0)\n",
" Downloading multiprocess-0.70.15-py310-none-any.whl (134 kB)\n", " Downloading multiprocess-0.70.15-py310-none-any.whl (134 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m17.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m19.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: fsspec[http]<=2023.10.0,>=2023.1.0 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (2023.6.0)\n", "\u001b[?25hRequirement already satisfied: fsspec[http]<=2023.10.0,>=2023.1.0 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (2023.6.0)\n",
"Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (3.8.6)\n", "Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (3.8.6)\n",
"Collecting responses<0.19 (from evaluate->lm-eval==1.0.0)\n", "Collecting responses<0.19 (from evaluate->lm-eval==1.0.0)\n",
...@@ -187,13 +187,13 @@ ...@@ -187,13 +187,13 @@
"Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.8->lm-eval==1.0.0) (1.3.0)\n", "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.8->lm-eval==1.0.0) (1.3.0)\n",
"Building wheels for collected packages: lm-eval, rouge-score, sqlitedict\n", "Building wheels for collected packages: lm-eval, rouge-score, sqlitedict\n",
" Building wheel for lm-eval (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", " Building wheel for lm-eval (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for lm-eval: filename=lm_eval-1.0.0-py3-none-any.whl size=986403 sha256=b7907b5840136fbd6ae948150ebab82776a0deb76359e59da24fbd2259de8094\n", " Created wheel for lm-eval: filename=lm_eval-1.0.0-py3-none-any.whl size=994254 sha256=88356155b19f2891981ecef948326ad6ce8ca40a6009378410ec20d0e225995a\n",
" Stored in directory: /tmp/pip-ephem-wheel-cache-axf286nt/wheels/17/01/26/599c0779e9858a70a73fa8a306699b5b9a868f820c225457b0\n", " Stored in directory: /tmp/pip-ephem-wheel-cache-9v6ye7h3/wheels/17/01/26/599c0779e9858a70a73fa8a306699b5b9a868f820c225457b0\n",
" Building wheel for rouge-score (setup.py) ... \u001b[?25l\u001b[?25hdone\n", " Building wheel for rouge-score (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for rouge-score: filename=rouge_score-0.1.2-py3-none-any.whl size=24933 sha256=8983ef5e600070817b0114ebe7ad48d72891d918ab2beea00ed6c5c437a6f713\n", " Created wheel for rouge-score: filename=rouge_score-0.1.2-py3-none-any.whl size=24933 sha256=6bb0d44e4881972c43ce194e7cb65233d309758cb15f0dec54590d3d2efcfc36\n",
" Stored in directory: /root/.cache/pip/wheels/5f/dd/89/461065a73be61a532ff8599a28e9beef17985c9e9c31e541b4\n", " Stored in directory: /root/.cache/pip/wheels/5f/dd/89/461065a73be61a532ff8599a28e9beef17985c9e9c31e541b4\n",
" Building wheel for sqlitedict (setup.py) ... \u001b[?25l\u001b[?25hdone\n", " Building wheel for sqlitedict (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for sqlitedict: filename=sqlitedict-2.1.0-py3-none-any.whl size=16863 sha256=146f7dcb0d03ca6c21a08e9d93d74d6c07cd42d0e37ddb7e922127e5ddb5e213\n", " Created wheel for sqlitedict: filename=sqlitedict-2.1.0-py3-none-any.whl size=16863 sha256=5747f7dd73ddf3d8fbcebf51b5e4f718fabe1e94bccdf16d2f22a2e65ee7fdf4\n",
" Stored in directory: /root/.cache/pip/wheels/79/d6/e7/304e0e6cb2221022c26d8161f7c23cd4f259a9e41e8bbcfabd\n", " Stored in directory: /root/.cache/pip/wheels/79/d6/e7/304e0e6cb2221022c26d8161f7c23cd4f259a9e41e8bbcfabd\n",
"Successfully built lm-eval rouge-score sqlitedict\n", "Successfully built lm-eval rouge-score sqlitedict\n",
"Installing collected packages: sqlitedict, zstandard, tcolorpy, pybind11, pyarrow-hotfix, portalocker, pathvalidate, mbstrdecoder, jsonlines, dill, colorama, typepy, tqdm-multiprocess, sacrebleu, rouge-score, responses, multiprocess, accelerate, datasets, DataProperty, tabledata, peft, evaluate, pytablewriter, lm-eval\n", "Installing collected packages: sqlitedict, zstandard, tcolorpy, pybind11, pyarrow-hotfix, portalocker, pathvalidate, mbstrdecoder, jsonlines, dill, colorama, typepy, tqdm-multiprocess, sacrebleu, rouge-score, responses, multiprocess, accelerate, datasets, DataProperty, tabledata, peft, evaluate, pytablewriter, lm-eval\n",
...@@ -212,29 +212,29 @@ ...@@ -212,29 +212,29 @@
"metadata": { "metadata": {
"colab": { "colab": {
"base_uri": "https://localhost:8080/", "base_uri": "https://localhost:8080/",
"height": 49, "height": 0,
"referenced_widgets": [ "referenced_widgets": [
"ff5a90d6c8884a4dbf5b70453e45bcad", "a1d3a8aa016544a78e8821c8f6199e06",
"214bcd0301bd423b8ed691cace119137", "f61ed33fad754146bdd2ac9db1ba1c48",
"460777325dd64ecaa929021ffc9d5f3a", "bfa0af6aeff344c6845e1080a878e92e",
"4a50bd7b62d94f40a13a2cb7fbc0ba4b", "fd1ad9e0367d4004aae853b91c3a7617",
"11451d297d3d4037b9fd6010d2097843", "6b2d90209ec14230b3d58a74ac9b83bf",
"164724b8d45a4adfbd44805cc16ca032", "a73f357065d34d7baf0453ae4a8d75e2",
"18171a183d1e4dd5a4f6670e517dcc1d", "46f521b73fd943c081c648fd873ebc0a",
"8c4e29d728264a5caf38dc3781a017a8", "7c5689bc13684db8a22681f41863dddd",
"52c8d0cf3e7f48959883934856331286", "48763b6233374554ae76035c0483066f",
"8f62151425e345128a110b6aff76af71", "4986a21eb560448fa79f4b25cde48951",
"412f24cc52494ca28151e4857361b88c" "aed3acd2f2d74003b44079c333a0698e"
] ]
}, },
"id": "uyO5MaKkZyah", "id": "uyO5MaKkZyah",
"outputId": "d487150b-65f3-4a4f-df22-b593594b8ecf" "outputId": "d46e8096-5086-4e49-967e-ea33d4a2a335"
}, },
"outputs": [ "outputs": [
{ {
"data": { "data": {
"application/vnd.jupyter.widget-view+json": { "application/vnd.jupyter.widget-view+json": {
"model_id": "ff5a90d6c8884a4dbf5b70453e45bcad", "model_id": "a1d3a8aa016544a78e8821c8f6199e06",
"version_major": 2, "version_major": 2,
"version_minor": 0 "version_minor": 0
}, },
...@@ -280,7 +280,6 @@ ...@@ -280,7 +280,6 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"YAML_boolq_string = '''\n", "YAML_boolq_string = '''\n",
"group: yes_or_no_tasks\n",
"task: demo_boolq\n", "task: demo_boolq\n",
"dataset_path: super_glue\n", "dataset_path: super_glue\n",
"dataset_name: boolq\n", "dataset_name: boolq\n",
...@@ -299,34 +298,58 @@ ...@@ -299,34 +298,58 @@
" f.write(YAML_boolq_string)" " f.write(YAML_boolq_string)"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": 4,
"metadata": { "metadata": {
"colab": { "id": "LOUHK7PtQfq4"
"base_uri": "https://localhost:8080/"
},
"id": "V-_3oDfXPIZ6",
"outputId": "194dcfa5-f3f1-4d3e-fb35-a34d8039a423"
}, },
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"2023-11-27:08:14:53,517 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n", "2023-11-29:11:54:55,156 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n",
"2023-11-27 08:14:54.499605: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", "2023-11-29 11:54:55.942051: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
"2023-11-27 08:14:54.499658: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", "2023-11-29 11:54:55.942108: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
"2023-11-27 08:14:54.499691: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", "2023-11-29 11:54:55.942142: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
"2023-11-27 08:14:56.139266: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", "2023-11-29 11:54:57.066802: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
"usage: lm_eval [-h] [--model MODEL] [--tasks TASKS] [--model_args MODEL_ARGS]\n", "2023-11-29:11:55:00,954 INFO [__main__.py:132] Verbosity set to INFO\n",
" [--num_fewshot NUM_FEWSHOT] [--batch_size BATCH_SIZE]\n", "2023-11-29:11:55:11,038 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n",
" [--max_batch_size MAX_BATCH_SIZE] [--device DEVICE]\n", "2023-11-29:11:55:11,038 INFO [__main__.py:143] Including path: ./\n",
" [--output_path = [dir/file.jsonl] [DIR]] [--limit LIMIT] [--use_cache USE_CACHE]\n", "2023-11-29:11:55:11,046 INFO [__main__.py:205] Selected Tasks: ['demo_boolq']\n",
" [--decontamination_ngrams_path DECONTAMINATION_NGRAMS_PATH] [--check_integrity]\n", "2023-11-29:11:55:11,047 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n",
" [--write_out] [--log_samples] [--show_config] [--include_path INCLUDE_PATH]\n", "2023-11-29:11:55:11,110 INFO [huggingface.py:120] Using device 'cuda'\n",
" [--verbosity VERBOSITY]\n", "config.json: 100% 571/571 [00:00<00:00, 2.87MB/s]\n",
"lm_eval: error: unrecognized arguments: \\\n" "model.safetensors: 100% 5.68G/5.68G [00:32<00:00, 173MB/s]\n",
"tokenizer_config.json: 100% 396/396 [00:00<00:00, 2.06MB/s]\n",
"tokenizer.json: 100% 2.11M/2.11M [00:00<00:00, 11.6MB/s]\n",
"special_tokens_map.json: 100% 99.0/99.0 [00:00<00:00, 555kB/s]\n",
"2023-11-29:11:56:18,658 WARNING [task.py:614] [Task: demo_boolq] metric acc is defined, but aggregation is not. using default aggregation=mean\n",
"2023-11-29:11:56:18,658 WARNING [task.py:626] [Task: demo_boolq] metric acc is defined, but higher_is_better is not. using default higher_is_better=True\n",
"Downloading builder script: 100% 30.7k/30.7k [00:00<00:00, 59.0MB/s]\n",
"Downloading metadata: 100% 38.7k/38.7k [00:00<00:00, 651kB/s]\n",
"Downloading readme: 100% 14.8k/14.8k [00:00<00:00, 37.3MB/s]\n",
"Downloading data: 100% 4.12M/4.12M [00:00<00:00, 55.1MB/s]\n",
"Generating train split: 100% 9427/9427 [00:00<00:00, 15630.89 examples/s]\n",
"Generating validation split: 100% 3270/3270 [00:00<00:00, 20002.56 examples/s]\n",
"Generating test split: 100% 3245/3245 [00:00<00:00, 20866.19 examples/s]\n",
"2023-11-29:11:56:22,315 INFO [task.py:355] Building contexts for task on rank 0...\n",
"2023-11-29:11:56:22,322 INFO [evaluator.py:319] Running loglikelihood requests\n",
"100% 20/20 [00:04<00:00, 4.37it/s]\n",
"fatal: not a git repository (or any of the parent directories): .git\n",
"hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n",
"| Tasks |Version|Filter|n-shot|Metric|Value| |Stderr|\n",
"|----------|-------|------|-----:|------|----:|---|-----:|\n",
"|demo_boolq|Yaml |none | 0|acc | 1|± | 0|\n",
"\n"
] ]
} }
], ],
...@@ -345,7 +368,7 @@ ...@@ -345,7 +368,7 @@
"id": "LOUHK7PtQfq4" "id": "LOUHK7PtQfq4"
}, },
"source": [ "source": [
"Oftenly, tasks are part of a larger group used to meausre different capabilities. The dynamism of the field today means new dimensions of evaluation can come about which would mix and match new and older tasks alike. In LM-Eval, We can also group tasks and call that the group name to evaluate on a set of tasks easily. In this instance, let's evaluate the group `yes_or_no_tasks` which comprise of the tasks `demo_boolq` and `demo_cola`; tasks which are multiple choice tasks with options `yes` and `no` as the name suggests.\n", "Often, tasks are part of a larger group used to meausre different capabilities. The dynamism of the field today means new dimensions of evaluation can come about which would mix and match new and older tasks alike. In LM-Eval, We can also group tasks and call that the group name to evaluate on a set of tasks easily. In this instance, let's evaluate the group `yes_or_no_tasks` which comprise of the tasks `demo_boolq` and `demo_cola`; tasks which are multiple choice tasks with options `yes` and `no` as the name suggests.\n",
"\n", "\n",
"<!-- making new groups is easier than ever, allowing user to work bottom-up by makiing individual tasks and linking them to a group or Top-Down, making a new group by listing existing tasks.\n", "<!-- making new groups is easier than ever, allowing user to work bottom-up by makiing individual tasks and linking them to a group or Top-Down, making a new group by listing existing tasks.\n",
"\n", "\n",
...@@ -386,65 +409,46 @@ ...@@ -386,65 +409,46 @@
"cell_type": "code", "cell_type": "code",
"execution_count": 6, "execution_count": 6,
"metadata": { "metadata": {
"colab": { "id": "XceRKCuuDtbn"
"base_uri": "https://localhost:8080/"
},
"id": "t2p91qUi-WBR",
"outputId": "cc22b65b-116a-4371-f709-d5cb865f99a9"
}, },
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"2023-11-27:08:15:04,956 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n", "2023-11-29:11:56:33,016 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n",
"2023-11-27 08:15:06.059715: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", "2023-11-29 11:56:33.852995: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
"2023-11-27 08:15:06.059774: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", "2023-11-29 11:56:33.853050: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
"2023-11-27 08:15:06.059810: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", "2023-11-29 11:56:33.853087: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
"2023-11-27 08:15:07.648550: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", "2023-11-29 11:56:35.129047: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
"2023-11-27:08:15:10,952 INFO [__main__.py:124] Verbosity set to INFO\n", "2023-11-29:11:56:38,546 INFO [__main__.py:132] Verbosity set to INFO\n",
"2023-11-27:08:15:18,895 WARNING [__main__.py:130] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n", "2023-11-29:11:56:47,509 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n",
"2023-11-27:08:15:18,895 INFO [__main__.py:135] Including path: ./\n", "2023-11-29:11:56:47,509 INFO [__main__.py:143] Including path: ./\n",
"2023-11-27:08:15:18,915 INFO [__main__.py:197] Selected Tasks: ['yes_or_no_tasks']\n", "2023-11-29:11:56:47,517 INFO [__main__.py:205] Selected Tasks: ['yes_or_no_tasks']\n",
"2023-11-27:08:15:18,979 INFO [huggingface.py:119] Using device 'cuda'\n", "2023-11-29:11:56:47,520 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n",
"config.json: 100% 571/571 [00:00<00:00, 2.57MB/s]\n", "2023-11-29:11:56:47,550 INFO [huggingface.py:120] Using device 'cuda'\n",
"model.safetensors: 100% 5.68G/5.68G [00:40<00:00, 142MB/s]\n", "2023-11-29:11:57:08,743 WARNING [task.py:614] [Task: demo_cola] metric acc is defined, but aggregation is not. using default aggregation=mean\n",
"tokenizer_config.json: 100% 396/396 [00:00<00:00, 1.97MB/s]\n", "2023-11-29:11:57:08,743 WARNING [task.py:626] [Task: demo_cola] metric acc is defined, but higher_is_better is not. using default higher_is_better=True\n",
"tokenizer.json: 100% 2.11M/2.11M [00:00<00:00, 8.57MB/s]\n", "Downloading builder script: 100% 28.8k/28.8k [00:00<00:00, 52.7MB/s]\n",
"special_tokens_map.json: 100% 99.0/99.0 [00:00<00:00, 541kB/s]\n", "Downloading metadata: 100% 28.7k/28.7k [00:00<00:00, 51.9MB/s]\n",
"2023-11-27:08:16:31,344 WARNING [task.py:612] [Task: demo_boolq] metric acc is defined, but aggregation is not. using default aggregation=mean\n", "Downloading readme: 100% 27.9k/27.9k [00:00<00:00, 48.0MB/s]\n",
"2023-11-27:08:16:31,344 WARNING [task.py:624] [Task: demo_boolq] metric acc is defined, but higher_is_better is not. using default higher_is_better=True\n", "Downloading data: 100% 377k/377k [00:00<00:00, 12.0MB/s]\n",
"Downloading builder script: 100% 30.7k/30.7k [00:00<00:00, 36.5MB/s]\n", "Generating train split: 100% 8551/8551 [00:00<00:00, 19744.58 examples/s]\n",
"Downloading metadata: 100% 38.7k/38.7k [00:00<00:00, 660kB/s]\n", "Generating validation split: 100% 1043/1043 [00:00<00:00, 27057.01 examples/s]\n",
"Downloading readme: 100% 14.8k/14.8k [00:00<00:00, 42.8MB/s]\n", "Generating test split: 100% 1063/1063 [00:00<00:00, 22705.17 examples/s]\n",
"Downloading data: 100% 4.12M/4.12M [00:00<00:00, 54.6MB/s]\n", "2023-11-29:11:57:11,698 INFO [task.py:355] Building contexts for task on rank 0...\n",
"Generating train split: 100% 9427/9427 [00:00<00:00, 15481.88 examples/s]\n", "2023-11-29:11:57:11,704 INFO [evaluator.py:319] Running loglikelihood requests\n",
"Generating validation split: 100% 3270/3270 [00:00<00:00, 19302.83 examples/s]\n", "100% 20/20 [00:03<00:00, 5.15it/s]\n",
"Generating test split: 100% 3245/3245 [00:00<00:00, 20505.42 examples/s]\n",
"2023-11-27:08:16:35,225 WARNING [task.py:612] [Task: demo_cola] metric acc is defined, but aggregation is not. using default aggregation=mean\n",
"2023-11-27:08:16:35,225 WARNING [task.py:624] [Task: demo_cola] metric acc is defined, but higher_is_better is not. using default higher_is_better=True\n",
"Downloading builder script: 100% 28.8k/28.8k [00:00<00:00, 36.8MB/s]\n",
"Downloading metadata: 100% 28.7k/28.7k [00:00<00:00, 57.2MB/s]\n",
"Downloading readme: 100% 27.9k/27.9k [00:00<00:00, 52.1MB/s]\n",
"Downloading data: 100% 377k/377k [00:00<00:00, 11.9MB/s]\n",
"Generating train split: 100% 8551/8551 [00:00<00:00, 28820.03 examples/s]\n",
"Generating validation split: 100% 1043/1043 [00:00<00:00, 27790.96 examples/s]\n",
"Generating test split: 100% 1063/1063 [00:00<00:00, 26533.12 examples/s]\n",
"2023-11-27:08:16:38,366 INFO [task.py:353] Building contexts for task on rank 0...\n",
"2023-11-27:08:16:38,373 INFO [task.py:353] Building contexts for task on rank 0...\n",
"2023-11-27:08:16:38,379 INFO [evaluator.py:290] Running loglikelihood requests\n",
"100% 40/40 [00:05<00:00, 7.18it/s]\n",
"fatal: not a git repository (or any of the parent directories): .git\n", "fatal: not a git repository (or any of the parent directories): .git\n",
"hf (pretrained=EleutherAI/pythia-2.8b), limit: 10.0, num_fewshot: None, batch_size: 1\n", "hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n",
"| Tasks |Version|Filter|Metric|Value| |Stderr|\n", "| Tasks |Version|Filter|n-shot|Metric|Value| |Stderr|\n",
"|---------------|-------|------|------|----:|---|-----:|\n", "|---------------|-------|------|-----:|------|----:|---|-----:|\n",
"|yes_or_no_tasks|N/A |none |acc | 0.85|± |0.1303|\n", "|yes_or_no_tasks|N/A |none | 0|acc | 0.7|± |0.1528|\n",
"| - demo_boolq |Yaml |none |acc | 1.00|± |0.0000|\n", "| - demo_cola |Yaml |none | 0|acc | 0.7|± |0.1528|\n",
"| - demo_cola |Yaml |none |acc | 0.70|± |0.1528|\n",
"\n", "\n",
"| Groups |Version|Filter|Metric|Value| |Stderr|\n", "| Groups |Version|Filter|n-shot|Metric|Value| |Stderr|\n",
"|---------------|-------|------|------|----:|---|-----:|\n", "|---------------|-------|------|-----:|------|----:|---|-----:|\n",
"|yes_or_no_tasks|N/A |none |acc | 0.85|± |0.1303|\n", "|yes_or_no_tasks|N/A |none | 0|acc | 0.7|± |0.1528|\n",
"\n" "\n"
] ]
} }
...@@ -510,44 +514,41 @@ ...@@ -510,44 +514,41 @@
"cell_type": "code", "cell_type": "code",
"execution_count": 8, "execution_count": 8,
"metadata": { "metadata": {
"colab": { "id": "jyKOfCsKb-xy"
"base_uri": "https://localhost:8080/"
},
"id": "_bhGyryiTs-p",
"outputId": "4ac4e69d-8841-456d-c617-ad1487cd3a34"
}, },
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"2023-11-27:08:16:50,338 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n", "2023-11-29:11:57:23,598 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n",
"2023-11-27 08:16:51.182014: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", "2023-11-29 11:57:24.719750: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
"2023-11-27 08:16:51.182064: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", "2023-11-29 11:57:24.719806: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
"2023-11-27 08:16:51.182100: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", "2023-11-29 11:57:24.719847: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
"2023-11-27 08:16:52.470541: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", "2023-11-29 11:57:26.656125: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
"2023-11-27:08:16:55,648 INFO [__main__.py:124] Verbosity set to INFO\n", "2023-11-29:11:57:31,563 INFO [__main__.py:132] Verbosity set to INFO\n",
"2023-11-27:08:17:05,419 WARNING [__main__.py:130] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n", "2023-11-29:11:57:40,541 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n",
"2023-11-27:08:17:05,420 INFO [__main__.py:135] Including path: ./\n", "2023-11-29:11:57:40,541 INFO [__main__.py:143] Including path: ./\n",
"2023-11-27:08:17:05,433 INFO [__main__.py:197] Selected Tasks: ['demo_mmlu_high_school_geography']\n", "2023-11-29:11:57:40,558 INFO [__main__.py:205] Selected Tasks: ['demo_mmlu_high_school_geography']\n",
"2023-11-27:08:17:05,469 INFO [huggingface.py:119] Using device 'cuda'\n", "2023-11-29:11:57:40,559 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n",
"Downloading builder script: 100% 5.84k/5.84k [00:00<00:00, 20.6MB/s]\n", "2023-11-29:11:57:40,589 INFO [huggingface.py:120] Using device 'cuda'\n",
"Downloading metadata: 100% 106k/106k [00:00<00:00, 1.74MB/s]\n", "Downloading builder script: 100% 5.84k/5.84k [00:00<00:00, 17.7MB/s]\n",
"Downloading readme: 100% 39.7k/39.7k [00:00<00:00, 7.61MB/s]\n", "Downloading metadata: 100% 106k/106k [00:00<00:00, 892kB/s] \n",
"Downloading data: 100% 166M/166M [00:01<00:00, 88.8MB/s]\n", "Downloading readme: 100% 39.7k/39.7k [00:00<00:00, 631kB/s]\n",
"Generating auxiliary_train split: 100% 99842/99842 [00:08<00:00, 12231.28 examples/s]\n", "Downloading data: 100% 166M/166M [00:01<00:00, 89.0MB/s]\n",
"Generating test split: 100% 198/198 [00:00<00:00, 1271.62 examples/s]\n", "Generating auxiliary_train split: 100% 99842/99842 [00:07<00:00, 12536.83 examples/s]\n",
"Generating validation split: 100% 22/22 [00:00<00:00, 2947.88 examples/s]\n", "Generating test split: 100% 198/198 [00:00<00:00, 1439.20 examples/s]\n",
"Generating dev split: 100% 5/5 [00:00<00:00, 38.04 examples/s]\n", "Generating validation split: 100% 22/22 [00:00<00:00, 4181.76 examples/s]\n",
"2023-11-27:08:17:40,164 INFO [task.py:353] Building contexts for task on rank 0...\n", "Generating dev split: 100% 5/5 [00:00<00:00, 36.25 examples/s]\n",
"2023-11-27:08:17:40,186 INFO [evaluator.py:290] Running loglikelihood requests\n", "2023-11-29:11:58:09,798 INFO [task.py:355] Building contexts for task on rank 0...\n",
"100% 40/40 [00:05<00:00, 7.74it/s]\n", "2023-11-29:11:58:09,822 INFO [evaluator.py:319] Running loglikelihood requests\n",
"100% 40/40 [00:05<00:00, 7.86it/s]\n",
"fatal: not a git repository (or any of the parent directories): .git\n", "fatal: not a git repository (or any of the parent directories): .git\n",
"hf (pretrained=EleutherAI/pythia-2.8b), limit: 10.0, num_fewshot: None, batch_size: 1\n", "hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n",
"| Tasks |Version|Filter| Metric |Value| |Stderr|\n", "| Tasks |Version|Filter|n-shot| Metric |Value| |Stderr|\n",
"|-------------------------------|-------|------|--------|----:|---|-----:|\n", "|-------------------------------|-------|------|-----:|--------|----:|---|-----:|\n",
"|demo_mmlu_high_school_geography|Yaml |none |acc | 0.3|± |0.1528|\n", "|demo_mmlu_high_school_geography|Yaml |none | 0|acc | 0.3|± |0.1528|\n",
"| | |none |acc_norm| 0.3|± |0.1528|\n", "| | |none | 0|acc_norm| 0.3|± |0.1528|\n",
"\n" "\n"
] ]
} }
...@@ -572,7 +573,7 @@ ...@@ -572,7 +573,7 @@
"source": [ "source": [
"We could also evaluate this task in a different way. For example, instead of observing the loglikelihood of the letters, we can instead evaluate on the choices themselves as the continuation. This is done by simply changing `doc_to_choice` from a list of letters to the corresponding `choices` field from the HF dataset. We write `\"{{choices}}\"` so that the string field is interpreted as jinja string that acquires the list from the HF dataset directly.\n", "We could also evaluate this task in a different way. For example, instead of observing the loglikelihood of the letters, we can instead evaluate on the choices themselves as the continuation. This is done by simply changing `doc_to_choice` from a list of letters to the corresponding `choices` field from the HF dataset. We write `\"{{choices}}\"` so that the string field is interpreted as jinja string that acquires the list from the HF dataset directly.\n",
"\n", "\n",
"Another convinient feature here is since we're only modifying the `doc_to_choice` and the rest of config is the same as the task above, we can use the above configuration as a template by using `include: mmlu_high_school_geography.yaml` to load the config from that file. We'll need to add a unique task name as to not colide with the existing yaml config we're including. For this case we'll simply name this one `mmlu_high_school_geography_continuation`. `doc_to_text` is added here just for sake of clarity." "Another convenient feature here is since we're only modifying the `doc_to_choice` and the rest of config is the same as the task above, we can use the above configuration as a template by using `include: mmlu_high_school_geography.yaml` to load the config from that file. We'll need to add a unique task name as to not colide with the existing yaml config we're including. For this case we'll simply name this one `mmlu_high_school_geography_continuation`. `doc_to_text` is added here just for sake of clarity."
] ]
}, },
{ {
...@@ -597,36 +598,33 @@ ...@@ -597,36 +598,33 @@
"cell_type": "code", "cell_type": "code",
"execution_count": 10, "execution_count": 10,
"metadata": { "metadata": {
"colab": { "id": "-_CVnDirdy7j"
"base_uri": "https://localhost:8080/"
},
"id": "NJnMELcFTnRA",
"outputId": "94f26428-d324-4ee2-ac2c-1cbb57fecff1"
}, },
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"2023-11-27:08:17:52,429 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n", "2023-11-29:11:58:21,284 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n",
"2023-11-27 08:17:53.714003: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", "2023-11-29 11:58:22.850159: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
"2023-11-27 08:17:53.714080: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", "2023-11-29 11:58:22.850219: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
"2023-11-27 08:17:53.714119: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", "2023-11-29 11:58:22.850254: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
"2023-11-27 08:17:55.953811: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", "2023-11-29 11:58:24.948103: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
"2023-11-27:08:17:59,577 INFO [__main__.py:124] Verbosity set to INFO\n", "2023-11-29:11:58:28,460 INFO [__main__.py:132] Verbosity set to INFO\n",
"2023-11-27:08:18:07,654 WARNING [__main__.py:130] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n", "2023-11-29:11:58:37,935 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n",
"2023-11-27:08:18:07,654 INFO [__main__.py:135] Including path: ./\n", "2023-11-29:11:58:37,935 INFO [__main__.py:143] Including path: ./\n",
"2023-11-27:08:18:07,686 INFO [__main__.py:197] Selected Tasks: ['demo_mmlu_high_school_geography_continuation']\n", "2023-11-29:11:58:37,969 INFO [__main__.py:205] Selected Tasks: ['demo_mmlu_high_school_geography_continuation']\n",
"2023-11-27:08:18:07,735 INFO [huggingface.py:119] Using device 'cuda'\n", "2023-11-29:11:58:37,972 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n",
"2023-11-27:08:18:25,609 INFO [task.py:353] Building contexts for task on rank 0...\n", "2023-11-29:11:58:38,008 INFO [huggingface.py:120] Using device 'cuda'\n",
"2023-11-27:08:18:25,628 INFO [evaluator.py:290] Running loglikelihood requests\n", "2023-11-29:11:58:59,758 INFO [task.py:355] Building contexts for task on rank 0...\n",
"100% 40/40 [00:05<00:00, 7.68it/s]\n", "2023-11-29:11:58:59,777 INFO [evaluator.py:319] Running loglikelihood requests\n",
"100% 40/40 [00:02<00:00, 16.23it/s]\n",
"fatal: not a git repository (or any of the parent directories): .git\n", "fatal: not a git repository (or any of the parent directories): .git\n",
"hf (pretrained=EleutherAI/pythia-2.8b), limit: 10.0, num_fewshot: None, batch_size: 1\n", "hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n",
"| Tasks |Version|Filter| Metric |Value| |Stderr|\n", "| Tasks |Version|Filter|n-shot| Metric |Value| |Stderr|\n",
"|--------------------------------------------|-------|------|--------|----:|---|-----:|\n", "|--------------------------------------------|-------|------|-----:|--------|----:|---|-----:|\n",
"|demo_mmlu_high_school_geography_continuation|Yaml |none |acc | 0.1|± |0.1000|\n", "|demo_mmlu_high_school_geography_continuation|Yaml |none | 0|acc | 0.1|± |0.1000|\n",
"| | |none |acc_norm| 0.2|± |0.1333|\n", "| | |none | 0|acc_norm| 0.2|± |0.1333|\n",
"\n" "\n"
] ]
} }
...@@ -656,12 +654,7 @@ ...@@ -656,12 +654,7 @@
"cell_type": "code", "cell_type": "code",
"execution_count": 11, "execution_count": 11,
"metadata": { "metadata": {
"colab": { "id": "duBDqC6PAdjL"
"base_uri": "https://localhost:8080/",
"height": 17
},
"id": "-3eoXdXzUOjT",
"outputId": "0201ee0a-5cfa-4cda-8187-edfddb205d66"
}, },
"outputs": [ "outputs": [
{ {
...@@ -683,7 +676,7 @@ ...@@ -683,7 +676,7 @@
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {
"id": "duBDqC6PAdjL" "id": "6p0-KPwAgK5j"
}, },
"source": [ "source": [
"## Closer Look at YAML Fields\n", "## Closer Look at YAML Fields\n",
...@@ -695,7 +688,7 @@ ...@@ -695,7 +688,7 @@
"1. `loglikelihood`: Evaluates the loglikeihood of a continuation\n", "1. `loglikelihood`: Evaluates the loglikeihood of a continuation\n",
"2. `loglikelihood_rolling`\n", "2. `loglikelihood_rolling`\n",
"3. `multiple_choice`: Evaluates loglikelihood among the a number of choices predicted by the model.\n", "3. `multiple_choice`: Evaluates loglikelihood among the a number of choices predicted by the model.\n",
"4. `greedy_until`: Model outputs greedy generation (can be configured to to use beam search and other generation-related paramaters)\n", "4. `greedy_until`: Model outputs greedy generation (can be configured to to use beam search and other generation-related parameters)\n",
"\n", "\n",
"The core prompt revolves around 3 fields.\n", "The core prompt revolves around 3 fields.\n",
"1. `doc_to_text`: Denotes the prompt template that will be used as input to the model.\n", "1. `doc_to_text`: Denotes the prompt template that will be used as input to the model.\n",
...@@ -728,32 +721,33 @@ ...@@ -728,32 +721,33 @@
"base_uri": "https://localhost:8080/" "base_uri": "https://localhost:8080/"
}, },
"id": "DYZ5c0JhR1lJ", "id": "DYZ5c0JhR1lJ",
"outputId": "3a9fd1e4-6b3e-46cf-9a9e-ca316f6e1b65" "outputId": "ca945235-fb9e-4f17-8bfa-78e7d6ec1490"
}, },
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"2023-11-27:08:18:37,991 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n", "2023-11-29:11:59:08,312 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n",
"2023-11-27 08:18:39.666844: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", "2023-11-29 11:59:09.348327: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
"2023-11-27 08:18:39.666894: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", "2023-11-29 11:59:09.348387: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
"2023-11-27 08:18:39.666949: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", "2023-11-29 11:59:09.348421: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
"2023-11-27 08:18:41.101009: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", "2023-11-29 11:59:10.573752: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
"2023-11-27:08:18:44,452 INFO [__main__.py:124] Verbosity set to INFO\n", "2023-11-29:11:59:14,044 INFO [__main__.py:132] Verbosity set to INFO\n",
"2023-11-27:08:18:54,278 WARNING [__main__.py:130] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n", "2023-11-29:11:59:23,654 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n",
"2023-11-27:08:18:54,279 INFO [__main__.py:135] Including path: ./\n", "2023-11-29:11:59:23,654 INFO [__main__.py:143] Including path: ./\n",
"2023-11-27:08:18:54,303 INFO [__main__.py:197] Selected Tasks: ['demo_mmlu_high_school_geography_function_prompt']\n", "2023-11-29:11:59:23,678 INFO [__main__.py:205] Selected Tasks: ['demo_mmlu_high_school_geography_function_prompt']\n",
"2023-11-27:08:18:54,333 INFO [huggingface.py:119] Using device 'cuda'\n", "2023-11-29:11:59:23,679 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n",
"2023-11-27:08:19:12,078 INFO [task.py:353] Building contexts for task on rank 0...\n", "2023-11-29:11:59:23,708 INFO [huggingface.py:120] Using device 'cuda'\n",
"2023-11-27:08:19:12,084 INFO [evaluator.py:290] Running loglikelihood requests\n", "2023-11-29:11:59:44,516 INFO [task.py:355] Building contexts for task on rank 0...\n",
"100% 40/40 [00:02<00:00, 16.47it/s]\n", "2023-11-29:11:59:44,524 INFO [evaluator.py:319] Running loglikelihood requests\n",
"100% 40/40 [00:02<00:00, 15.41it/s]\n",
"fatal: not a git repository (or any of the parent directories): .git\n", "fatal: not a git repository (or any of the parent directories): .git\n",
"hf (pretrained=EleutherAI/pythia-2.8b), limit: 10.0, num_fewshot: None, batch_size: 1\n", "hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n",
"| Tasks |Version|Filter| Metric |Value| |Stderr|\n", "| Tasks |Version|Filter|n-shot| Metric |Value| |Stderr|\n",
"|-----------------------------------------------|-------|------|--------|----:|---|-----:|\n", "|-----------------------------------------------|-------|------|-----:|--------|----:|---|-----:|\n",
"|demo_mmlu_high_school_geography_function_prompt|Yaml |none |acc | 0.1|± |0.1000|\n", "|demo_mmlu_high_school_geography_function_prompt|Yaml |none | 0|acc | 0.1|± |0.1000|\n",
"| | |none |acc_norm| 0.2|± |0.1333|\n", "| | |none | 0|acc_norm| 0.2|± |0.1333|\n",
"\n" "\n"
] ]
} }
...@@ -810,7 +804,38 @@ ...@@ -810,7 +804,38 @@
}, },
"widgets": { "widgets": {
"application/vnd.jupyter.widget-state+json": { "application/vnd.jupyter.widget-state+json": {
"11451d297d3d4037b9fd6010d2097843": { "46f521b73fd943c081c648fd873ebc0a": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"48763b6233374554ae76035c0483066f": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"4986a21eb560448fa79f4b25cde48951": {
"model_module": "@jupyter-widgets/base", "model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0", "model_module_version": "1.2.0",
"model_name": "LayoutModel", "model_name": "LayoutModel",
...@@ -862,7 +887,7 @@ ...@@ -862,7 +887,7 @@
"width": null "width": null
} }
}, },
"164724b8d45a4adfbd44805cc16ca032": { "6b2d90209ec14230b3d58a74ac9b83bf": {
"model_module": "@jupyter-widgets/base", "model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0", "model_module_version": "1.2.0",
"model_name": "LayoutModel", "model_name": "LayoutModel",
...@@ -914,119 +939,7 @@ ...@@ -914,119 +939,7 @@
"width": null "width": null
} }
}, },
"18171a183d1e4dd5a4f6670e517dcc1d": { "7c5689bc13684db8a22681f41863dddd": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"214bcd0301bd423b8ed691cace119137": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_164724b8d45a4adfbd44805cc16ca032",
"placeholder": "​",
"style": "IPY_MODEL_18171a183d1e4dd5a4f6670e517dcc1d",
"value": "Downloading builder script: 100%"
}
},
"412f24cc52494ca28151e4857361b88c": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"460777325dd64ecaa929021ffc9d5f3a": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_8c4e29d728264a5caf38dc3781a017a8",
"max": 5669,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_52c8d0cf3e7f48959883934856331286",
"value": 5669
}
},
"4a50bd7b62d94f40a13a2cb7fbc0ba4b": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_8f62151425e345128a110b6aff76af71",
"placeholder": "​",
"style": "IPY_MODEL_412f24cc52494ca28151e4857361b88c",
"value": " 5.67k/5.67k [00:00&lt;00:00, 335kB/s]"
}
},
"52c8d0cf3e7f48959883934856331286": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"8c4e29d728264a5caf38dc3781a017a8": {
"model_module": "@jupyter-widgets/base", "model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0", "model_module_version": "1.2.0",
"model_name": "LayoutModel", "model_name": "LayoutModel",
...@@ -1078,7 +991,29 @@ ...@@ -1078,7 +991,29 @@
"width": null "width": null
} }
}, },
"8f62151425e345128a110b6aff76af71": { "a1d3a8aa016544a78e8821c8f6199e06": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_f61ed33fad754146bdd2ac9db1ba1c48",
"IPY_MODEL_bfa0af6aeff344c6845e1080a878e92e",
"IPY_MODEL_fd1ad9e0367d4004aae853b91c3a7617"
],
"layout": "IPY_MODEL_6b2d90209ec14230b3d58a74ac9b83bf"
}
},
"a73f357065d34d7baf0453ae4a8d75e2": {
"model_module": "@jupyter-widgets/base", "model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0", "model_module_version": "1.2.0",
"model_name": "LayoutModel", "model_name": "LayoutModel",
...@@ -1130,26 +1065,85 @@ ...@@ -1130,26 +1065,85 @@
"width": null "width": null
} }
}, },
"ff5a90d6c8884a4dbf5b70453e45bcad": { "aed3acd2f2d74003b44079c333a0698e": {
"model_module": "@jupyter-widgets/controls", "model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0", "model_module_version": "1.5.0",
"model_name": "HBoxModel", "model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"bfa0af6aeff344c6845e1080a878e92e": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": { "state": {
"_dom_classes": [], "_dom_classes": [],
"_model_module": "@jupyter-widgets/controls", "_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0", "_model_module_version": "1.5.0",
"_model_name": "HBoxModel", "_model_name": "FloatProgressModel",
"_view_count": null, "_view_count": null,
"_view_module": "@jupyter-widgets/controls", "_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0", "_view_module_version": "1.5.0",
"_view_name": "HBoxView", "_view_name": "ProgressView",
"box_style": "", "bar_style": "success",
"children": [ "description": "",
"IPY_MODEL_214bcd0301bd423b8ed691cace119137", "description_tooltip": null,
"IPY_MODEL_460777325dd64ecaa929021ffc9d5f3a", "layout": "IPY_MODEL_7c5689bc13684db8a22681f41863dddd",
"IPY_MODEL_4a50bd7b62d94f40a13a2cb7fbc0ba4b" "max": 5669,
], "min": 0,
"layout": "IPY_MODEL_11451d297d3d4037b9fd6010d2097843" "orientation": "horizontal",
"style": "IPY_MODEL_48763b6233374554ae76035c0483066f",
"value": 5669
}
},
"f61ed33fad754146bdd2ac9db1ba1c48": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_a73f357065d34d7baf0453ae4a8d75e2",
"placeholder": "​",
"style": "IPY_MODEL_46f521b73fd943c081c648fd873ebc0a",
"value": "Downloading builder script: 100%"
}
},
"fd1ad9e0367d4004aae853b91c3a7617": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_4986a21eb560448fa79f4b25cde48951",
"placeholder": "​",
"style": "IPY_MODEL_aed3acd2f2d74003b44079c333a0698e",
"value": " 5.67k/5.67k [00:00&lt;00:00, 205kB/s]"
} }
} }
} }
......
...@@ -6,4 +6,3 @@ mor ...@@ -6,4 +6,3 @@ mor
te te
ond ond
extraversion extraversion
nD
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment