Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
dc1f2539
Commit
dc1f2539
authored
Jun 25, 2023
by
fromSun2Moon
Browse files
legal judegement prediction updated: polyglot-ko
parent
8e041322
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
210 additions
and
4 deletions
+210
-4
lm_eval/tasks/__init__.py
lm_eval/tasks/__init__.py
+3
-1
lm_eval/tasks/legal_test.py
lm_eval/tasks/legal_test.py
+193
-2
main.py
main.py
+1
-1
run2.sh
run2.sh
+13
-0
No files found.
lm_eval/tasks/__init__.py
View file @
dc1f2539
...
@@ -346,7 +346,9 @@ TASK_REGISTRY = {
...
@@ -346,7 +346,9 @@ TASK_REGISTRY = {
"kohatespeech"
:
kohatespeech
.
HateSpeech
,
"kohatespeech"
:
kohatespeech
.
HateSpeech
,
"kohatespeech_gen_bias"
:
kohatespeech
.
GenderBias
,
"kohatespeech_gen_bias"
:
kohatespeech
.
GenderBias
,
"kohatespeech_apeach"
:
kohatespeech
.
Apeach
,
"kohatespeech_apeach"
:
kohatespeech
.
Apeach
,
"kolegal_legalcase"
:
legal_test
.
LegalCasename
,
"kolegal_legalcase"
:
legal_test
.
LegalBinary
,
"kolegal_civilcase"
:
legal_test
.
LJPCivil
,
"kolegal_criminalcase"
:
legal_test
.
LJPCriminal
,
**
xcopa
.
construct_tasks
(),
**
xcopa
.
construct_tasks
(),
**
bigbench
.
create_all_tasks
(),
**
bigbench
.
create_all_tasks
(),
**
xstorycloze
.
create_all_tasks
(),
**
xstorycloze
.
create_all_tasks
(),
...
...
lm_eval/tasks/legal_test.py
View file @
dc1f2539
...
@@ -3,9 +3,9 @@ Korean legal AI datasets, LBox OPEN
...
@@ -3,9 +3,9 @@ Korean legal AI datasets, LBox OPEN
Multi-task on Legal corpus
Multi-task on Legal corpus
https://arxiv.org/pdf/2206.05224.pdf
https://arxiv.org/pdf/2206.05224.pdf
"""
"""
import
numpy
as
np
import
numpy
as
np
from
lm_eval.base
import
Task
,
MultipleChoiceTask
,
rf
from
lm_eval.base
import
Task
,
MultipleChoiceTask
,
rf
from
lm_eval.metrics
import
bleu
,
chrf
,
ter
from
lm_eval.metrics
import
macro_f1_score
,
mean
,
matthews_corrcoef
,
f1_score
,
yesno
from
lm_eval.metrics
import
macro_f1_score
,
mean
,
matthews_corrcoef
,
f1_score
,
yesno
from
lm_eval.utils
import
general_detokenize
from
lm_eval.utils
import
general_detokenize
...
@@ -18,7 +18,8 @@ _CITATION ="""
...
@@ -18,7 +18,8 @@ _CITATION ="""
}
}
"""
"""
class
LegalCasename
(
Task
):
class
LegalBinary
(
Task
):
""" Predict civil(민사) or criminal(형사) case"""
VERSION
=
0
VERSION
=
0
DATASET_PATH
=
"lbox/lbox_open"
DATASET_PATH
=
"lbox/lbox_open"
DATASET_NAME
=
"casename_classification"
DATASET_NAME
=
"casename_classification"
...
@@ -75,3 +76,193 @@ class LegalCasename(Task):
...
@@ -75,3 +76,193 @@ class LegalCasename(Task):
"macro_f1"
:
macro_f1_score
"macro_f1"
:
macro_f1_score
}
}
class
LJPCivil
(
MultipleChoiceTask
):
VERSION
=
0
DATASET_PATH
=
"lbox/lbox_open"
DATASET_NAME
=
"ljp_civil"
def
has_training_docs
(
self
):
return
True
def
has_validation_docs
(
self
):
return
True
def
has_test_docs
(
self
):
return
True
def
training_docs
(
self
):
if
self
.
_training_docs
is
None
:
self
.
_training_docs
=
list
(
map
(
self
.
_process_doc
,
self
.
dataset
[
"train"
]))
return
self
.
_training_docs
def
validation_docs
(
self
):
return
map
(
self
.
_process_doc
,
self
.
dataset
[
"validation"
])
def
test_docs
(
self
):
return
map
(
self
.
_process_doc
,
self
.
dataset
[
"test"
])
def
doc_to_text
(
self
,
doc
):
return
doc
[
"query"
]
def
doc_to_target
(
self
,
doc
):
return
" {}"
.
format
(
doc
[
'gold'
])
def
proces_label
(
self
,
doc
):
return
{
'구상금'
:
0
,
'대여금'
:
1
,
'부당이득금'
:
2
,
'손해배상(기)'
:
3
}[
doc
[
'gold'
]]
def
_process_doc
(
self
,
doc
):
out_doc
=
{
"query"
:
"{}"
.
format
(
doc
[
'facts'
]),
"choices"
:
[
'구상금'
,
'대여금'
,
'부당이득금'
,
'손해배상(기)'
],
"gold"
:
doc
[
'casename'
]
}
return
out_doc
def
process_results
(
self
,
doc
,
results
):
pred
=
np
.
argmax
(
results
)
gold
=
self
.
proces_label
(
doc
)
return
{
"acc"
:
pred
==
gold
,
"macro_f1"
:
(
gold
,
pred
)
}
def
higher_is_better
(
self
):
return
{
"acc"
:
True
,
"macro_f1"
:
True
}
def
aggregation
(
self
):
return
{
"acc"
:
mean
,
"macro_f1"
:
macro_f1_score
}
class
LJPCivil
(
MultipleChoiceTask
):
VERSION
=
0
DATASET_PATH
=
"lbox/lbox_open"
DATASET_NAME
=
"ljp_civil"
def
has_training_docs
(
self
):
return
True
def
has_validation_docs
(
self
):
return
True
def
has_test_docs
(
self
):
return
True
def
training_docs
(
self
):
if
self
.
_training_docs
is
None
:
self
.
_training_docs
=
list
(
map
(
self
.
_process_doc
,
self
.
dataset
[
"train"
]))
return
self
.
_training_docs
def
validation_docs
(
self
):
return
map
(
self
.
_process_doc
,
self
.
dataset
[
"validation"
])
def
test_docs
(
self
):
return
map
(
self
.
_process_doc
,
self
.
dataset
[
"test"
])
def
doc_to_text
(
self
,
doc
):
return
doc
[
"query"
]
def
doc_to_target
(
self
,
doc
):
return
" {}"
.
format
(
doc
[
'gold'
])
def
proces_label
(
self
,
doc
):
return
{
'구상금'
:
0
,
'대여금'
:
1
,
'부당이득금'
:
2
,
'손해배상(기)'
:
3
}[
doc
[
'gold'
]]
def
_process_doc
(
self
,
doc
):
out_doc
=
{
"query"
:
"{}"
.
format
(
doc
[
'facts'
]),
"choices"
:
[
'구상금'
,
'대여금'
,
'부당이득금'
,
'손해배상(기)'
],
"gold"
:
doc
[
'casename'
]
}
return
out_doc
def
process_results
(
self
,
doc
,
results
):
pred
=
np
.
argmax
(
results
)
gold
=
self
.
proces_label
(
doc
)
return
{
"acc"
:
pred
==
gold
,
"macro_f1"
:
(
gold
,
pred
)
}
def
higher_is_better
(
self
):
return
{
"acc"
:
True
,
"macro_f1"
:
True
}
def
aggregation
(
self
):
return
{
"acc"
:
mean
,
"macro_f1"
:
macro_f1_score
}
class
LJPCriminal
(
MultipleChoiceTask
):
VERSION
=
0
DATASET_PATH
=
"lbox/lbox_open"
DATASET_NAME
=
"ljp_criminal"
def
has_training_docs
(
self
):
return
True
def
has_validation_docs
(
self
):
return
True
def
has_test_docs
(
self
):
return
True
def
training_docs
(
self
):
if
self
.
_training_docs
is
None
:
self
.
_training_docs
=
list
(
map
(
self
.
_process_doc
,
self
.
dataset
[
"train"
]))
return
self
.
_training_docs
def
validation_docs
(
self
):
return
map
(
self
.
_process_doc
,
self
.
dataset
[
"validation"
])
def
test_docs
(
self
):
return
map
(
self
.
_process_doc
,
self
.
dataset
[
"test"
])
def
doc_to_text
(
self
,
doc
):
return
doc
[
"query"
]
def
doc_to_target
(
self
,
doc
):
return
" {}"
.
format
(
doc
[
'gold'
])
def
proces_label
(
self
,
doc
):
return
{
'강제추행'
:
0
,
'공무집행방해'
:
1
,
'교통사고처리특례법위반(치상)'
:
2
,
'도로교통법위반(음주운전)'
:
3
,
\
'사기'
:
4
,
'상해'
:
5
,
'폭행'
:
6
}[
doc
[
'gold'
]]
def
_process_doc
(
self
,
doc
):
out_doc
=
{
"query"
:
"{}"
.
format
(
doc
[
'facts'
]),
"choices"
:
[
'강제추행'
,
'공무집행방해'
,
'교통사고처리특례법위반(치상)'
,
\
'도로교통법위반(음주운전)'
,
'사기'
,
'상해'
,
'폭행'
],
"gold"
:
doc
[
'casename'
]
}
return
out_doc
def
process_results
(
self
,
doc
,
results
):
pred
=
np
.
argmax
(
results
)
gold
=
self
.
proces_label
(
doc
)
return
{
"acc"
:
pred
==
gold
,
"macro_f1"
:
(
gold
,
pred
)
}
def
higher_is_better
(
self
):
return
{
"acc"
:
True
,
"macro_f1"
:
True
}
def
aggregation
(
self
):
return
{
"acc"
:
mean
,
"macro_f1"
:
macro_f1_score
}
main.py
View file @
dc1f2539
run2.sh
0 → 100644
View file @
dc1f2539
python3
-W
ignore main.py
--model
gpt2
--model_args
pretrained
=
EleutherAI/polyglot-ko-1.3B
\
--task
kolegal_criminalcase
--num_fewshot
0
python3
-W
ignore main.py
--model
gpt2
--model_args
pretrained
=
EleutherAI/polyglot-ko-1.3B
\
--task
kolegal_criminalcase
--num_fewshot
5
python3
-W
ignore main.py
--model
gpt2
--model_args
pretrained
=
EleutherAI/polyglot-ko-1.3B
\
--task
kolegal_criminalcase
--num_fewshot
10
# python3 -W ignore main.py --model gpt2 --model_args pretrained=EleutherAI/polyglot-ko-1.3B \
# --task kolegal_legalcase --num_fewshot 0
# python3 -W ignore main.py --model gpt2 --model_args pretrained=EleutherAI/polyglot-ko-1.3B \
# --task kolegal_legalcase --num_fewshot 5
# python3 -W ignore main.py --model gpt2 --model_args pretrained=EleutherAI/polyglot-ko-1.3B \
# --task kolegal_legalcase --num_fewshot 10
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment