Unverified Commit 48e9e719 authored by yhyang201's avatar yhyang201 Committed by GitHub
Browse files

Add --max-new-tokens CLI flag for MMMU evaluation (#11217)

parent 31b49c0b
...@@ -36,6 +36,7 @@ class EvalArgs: ...@@ -36,6 +36,7 @@ class EvalArgs:
profile: bool = False profile: bool = False
profile_number: int = 5 profile_number: int = 5
concurrency: int = 1 concurrency: int = 1
max_new_tokens: int = 30
response_answer_regex: str = "(.*)" response_answer_regex: str = "(.*)"
lora_path: Optional[str] = None lora_path: Optional[str] = None
...@@ -94,6 +95,12 @@ class EvalArgs: ...@@ -94,6 +95,12 @@ class EvalArgs:
default=EvalArgs.concurrency, default=EvalArgs.concurrency,
help="Number of concurrent requests to make during evaluation. Default is 1, which means no concurrency.", help="Number of concurrent requests to make during evaluation. Default is 1, which means no concurrency.",
) )
parser.add_argument(
"--max-new-tokens",
type=int,
default=EvalArgs.max_new_tokens,
help="Maximum number of new tokens to generate per sample.",
)
parser.add_argument( parser.add_argument(
"--response-answer-regex", "--response-answer-regex",
type=str, type=str,
...@@ -234,7 +241,7 @@ def prepare_samples(eval_args: EvalArgs): ...@@ -234,7 +241,7 @@ def prepare_samples(eval_args: EvalArgs):
def get_sampling_params(eval_args): def get_sampling_params(eval_args):
max_new_tokens = 30 max_new_tokens = eval_args.max_new_tokens
temperature = 0.001 temperature = 0.001
extra_request_body = {} extra_request_body = {}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment