Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
3ba4e897
"...model/git@developer.sourcefind.cn:OpenDAS/megatron-lm.git" did not exist on "4554c3fed9a5b7daa5f564c84c71b8c689ba4f02"
Commit
3ba4e897
authored
Jul 01, 2025
by
Baber
Browse files
update type hints
parent
9b192374
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
7 additions
and
7 deletions
+7
-7
lm_eval/api/metrics.py
lm_eval/api/metrics.py
+7
-7
No files found.
lm_eval/api/metrics.py
View file @
3ba4e897
...
@@ -21,36 +21,36 @@ def bypass_agg(arr):
...
@@ -21,36 +21,36 @@ def bypass_agg(arr):
@
register_aggregation
(
"nanmean"
)
@
register_aggregation
(
"nanmean"
)
def
nanmean
(
arr
)
:
def
nanmean
(
arr
:
list
[
float
])
->
float
:
if
len
(
arr
)
==
0
or
all
(
np
.
isnan
(
arr
)):
if
len
(
arr
)
==
0
or
all
(
np
.
isnan
(
arr
)):
return
np
.
nan
return
np
.
nan
return
np
.
nanmean
(
arr
)
return
np
.
nanmean
(
arr
)
@
register_aggregation
(
"mean"
)
@
register_aggregation
(
"mean"
)
def
mean
(
arr
)
:
def
mean
(
arr
:
list
[
float
])
->
float
:
return
sum
(
arr
)
/
len
(
arr
)
return
sum
(
arr
)
/
len
(
arr
)
@
register_aggregation
(
"median"
)
@
register_aggregation
(
"median"
)
def
median
(
arr
)
:
def
median
(
arr
:
list
[
float
])
->
float
:
return
arr
[
len
(
arr
)
//
2
]
return
arr
[
len
(
arr
)
//
2
]
# Certain metrics must be calculated across all documents in a benchmark.
# Certain metrics must be calculated across all documents in a benchmark.
# We use them as aggregation metrics, paired with no-op passthrough metric fns.
# We use them as aggregation metrics, paired with no-op passthrough metric fns.
@
register_aggregation
(
"perplexity"
)
@
register_aggregation
(
"perplexity"
)
def
perplexity
(
items
)
:
def
perplexity
(
items
:
list
[
float
])
->
float
:
return
math
.
exp
(
-
mean
(
items
))
return
math
.
exp
(
-
mean
(
items
))
@
register_aggregation
(
"weighted_perplexity"
)
@
register_aggregation
(
"weighted_perplexity"
)
def
weighted_perplexity
(
items
)
:
def
weighted_perplexity
(
items
:
list
[
tuple
[
float
,
float
]])
->
float
:
return
math
.
exp
(
-
weighted_mean
(
items
))
return
math
.
exp
(
-
weighted_mean
(
items
))
@
register_aggregation
(
"bits_per_byte"
)
@
register_aggregation
(
"bits_per_byte"
)
def
bits_per_byte
(
items
)
:
def
bits_per_byte
(
items
:
list
[
tuple
[
float
,
float
]])
->
float
:
return
-
weighted_mean
(
items
)
/
math
.
log
(
2
)
return
-
weighted_mean
(
items
)
/
math
.
log
(
2
)
...
@@ -413,7 +413,7 @@ def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
...
@@ -413,7 +413,7 @@ def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
return
max
(
scores_for_ground_truths
)
return
max
(
scores_for_ground_truths
)
def
weighted_mean
(
items
)
:
def
weighted_mean
(
items
:
List
[
tuple
[
float
,
float
]])
->
float
:
a
,
b
=
zip
(
*
items
)
a
,
b
=
zip
(
*
items
)
return
sum
(
a
)
/
sum
(
b
)
return
sum
(
a
)
/
sum
(
b
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment