Unverified Commit 817a2fe7 authored by Anna Fontana's avatar Anna Fontana Committed by GitHub
Browse files

Fix import error for eval_logger in score utils (#2940)



* Fix import error for eval_logger in score utils

* pacify pre-commit

---------
Co-authored-by: default avatarBaber <baber@hey.com>
parent 71f2954b
......@@ -3,12 +3,14 @@ Take in a YAML, and output all other splits with this YAML
"""
import argparse
import logging
import os
import yaml
from tqdm import tqdm
from lm_eval.utils import eval_logger
eval_logger = logging.getLogger(__name__)
SUBJECTS = {
......
......@@ -3,12 +3,14 @@ Take in a YAML, and output all other splits with this YAML
"""
import argparse
import logging
import os
import yaml
from tqdm import tqdm
from lm_eval.utils import eval_logger
eval_logger = logging.getLogger(__name__)
SUBJECTS = {
......
......@@ -3,12 +3,14 @@ Take in a YAML, and output all other splits with this YAML
"""
import argparse
import logging
import os
import yaml
from tqdm import tqdm
from lm_eval.utils import eval_logger
eval_logger = logging.getLogger(__name__)
SUBJECTS = {
......
......@@ -3,12 +3,14 @@ Take in a YAML, and output all other splits with this YAML
"""
import argparse
import logging
import os
import yaml
from tqdm import tqdm
from lm_eval.logger import eval_logger
eval_logger = logging.getLogger(__name__)
SUBSETS = ["WR", "GR", "RCS", "RCSS", "RCH", "LI"]
......
import json
import logging
import os
import requests
from requests.adapters import HTTPAdapter, Retry
from lm_eval.utils import eval_logger
eval_logger = logging.getLogger(__name__)
def toxicity_perspective_api(
......
......@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
from functools import partial
......@@ -22,9 +23,10 @@ from datasets import Dataset
from lm_eval.tasks.score import utils
from lm_eval.tasks.score.utils import prompt_consistency_rate, robustness_doc_to_text
from lm_eval.utils import eval_logger
eval_logger = logging.getLogger(__name__)
TEMPLATE_FILE_PATH = os.path.join(os.path.dirname(__file__), "prompt_templates.json")
PROMPT_ROBUSTNESS_TEMPLATE_KEY = "prompt_robustness"
......
......@@ -13,6 +13,7 @@
# limitations under the License.
import json
import logging
import os
from functools import partial
from itertools import combinations
......@@ -28,9 +29,10 @@ from lm_eval.tasks.score.math.math_grader import (
normalize_answer_string,
)
from lm_eval.tasks.score.utils import robustness_doc_to_text
from lm_eval.utils import eval_logger
eval_logger = logging.getLogger(__name__)
TEMPLATE_FILE_PATH = os.path.join(os.path.dirname(__file__), "prompt_templates.json")
PROMPT_ROBUSTNESS_TEMPLATE_KEY = "prompt_robustness"
......
......@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from functools import partial
from typing import Any, Dict, List
......@@ -20,9 +21,10 @@ import numpy as np
from lm_eval.tasks.score import utils
from lm_eval.tasks.score.utils import prompt_consistency_rate, robustness_doc_to_text
from lm_eval.utils import eval_logger
eval_logger = logging.getLogger(__name__)
TEMPLATE_FILE_PATH = os.path.join(os.path.dirname(__file__), "prompt_templates.json")
PROMPT_ROBUSTNESS_TEMPLATE_KEY = "prompt_robustness"
......
......@@ -14,6 +14,7 @@
import copy
import json
import logging
import re
import string
import sys
......@@ -24,7 +25,8 @@ from typing import Any, Dict, List
import numpy as np
from datasets import Dataset
from lm_eval.utils import eval_logger
eval_logger = logging.getLogger(__name__)
NUMERALS = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment