Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
fb111087
Commit
fb111087
authored
Jul 04, 2023
by
yingfhu
Browse files
[Feat] support opencompass
parent
7d346000
Changes
81
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
380 additions
and
0 deletions
+380
-0
configs/datasets/narrativeqa/narrativeqa_gen.py
configs/datasets/narrativeqa/narrativeqa_gen.py
+4
-0
configs/datasets/nq/nq_gen_c00b89.py
configs/datasets/nq/nq_gen_c00b89.py
+29
-0
configs/datasets/piqa/piqa_gen.py
configs/datasets/piqa/piqa_gen.py
+4
-0
configs/datasets/piqa/piqa_ppl_788dbe.py
configs/datasets/piqa/piqa_ppl_788dbe.py
+31
-0
configs/datasets/qabench/qabench_gen.py
configs/datasets/qabench/qabench_gen.py
+4
-0
configs/datasets/race/race_gen_12de48.py
configs/datasets/race/race_gen_12de48.py
+46
-0
configs/datasets/race/race_gen_d18b89.py
configs/datasets/race/race_gen_d18b89.py
+40
-0
configs/datasets/race/race_ppl.py
configs/datasets/race/race_ppl.py
+4
-0
configs/datasets/realtoxicprompts/realtoxicprompts_gen.py
configs/datasets/realtoxicprompts/realtoxicprompts_gen.py
+4
-0
configs/datasets/safety/safety_gen.py
configs/datasets/safety/safety_gen.py
+4
-0
configs/datasets/siqa/siqa_gen_a3c714.py
configs/datasets/siqa/siqa_gen_a3c714.py
+42
-0
configs/datasets/siqa/siqa_ppl.py
configs/datasets/siqa/siqa_ppl.py
+4
-0
configs/datasets/storycloze/storycloze_ppl.py
configs/datasets/storycloze/storycloze_ppl.py
+4
-0
configs/datasets/storycloze/storycloze_ppl_7f4c64.py
configs/datasets/storycloze/storycloze_ppl_7f4c64.py
+36
-0
configs/datasets/summedits/summedits_gen.py
configs/datasets/summedits/summedits_gen.py
+4
-0
configs/datasets/summedits/summedits_gen_4f35b5.py
configs/datasets/summedits/summedits_gen_4f35b5.py
+37
-0
configs/datasets/triviaqarc/triviaqarc_gen_a02306.py
configs/datasets/triviaqarc/triviaqarc_gen_a02306.py
+30
-0
configs/models/classic/tigerbot-7b-sft.py
configs/models/classic/tigerbot-7b-sft.py
+29
-0
configs/summarizers/groups/bbh.py
configs/summarizers/groups/bbh.py
+6
-0
docs/en/_templates/404.html
docs/en/_templates/404.html
+18
-0
No files found.
configs/datasets/narrativeqa/narrativeqa_gen.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.narrativeqa_gen_5786a7
import
narrativeqa_datasets
# noqa: F401, F403
configs/datasets/nq/nq_gen_c00b89.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.datasets
import
NaturalQuestionDataset
,
NQEvaluator
nq_reader_cfg
=
dict
(
input_columns
=
[
'question'
],
output_column
=
'answer'
,
train_split
=
'test'
)
nq_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'Question: {question}?
\n
Answer: '
),
],
)),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
nq_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
NQEvaluator
),
pred_role
=
"BOT"
)
nq_datasets
=
[
dict
(
type
=
NaturalQuestionDataset
,
abbr
=
'nq'
,
path
=
'./data/nq/'
,
reader_cfg
=
nq_reader_cfg
,
infer_cfg
=
nq_infer_cfg
,
eval_cfg
=
nq_eval_cfg
)
]
configs/datasets/piqa/piqa_gen.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.piqa_gen_8287ae
import
piqa_datasets
# noqa: F401, F403
configs/datasets/piqa/piqa_ppl_788dbe.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
HFDataset
piqa_reader_cfg
=
dict
(
input_columns
=
[
'goal'
,
'sol1'
,
'sol2'
],
output_column
=
'label'
,
test_split
=
'validation'
)
piqa_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
0
:
'The following makes sense:
\n
Q: {goal}
\n
A: {sol1}
\n
'
,
1
:
'The following makes sense:
\n
Q: {goal}
\n
A: {sol2}
\n
'
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
piqa_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
piqa_datasets
=
[
dict
(
type
=
HFDataset
,
path
=
'piqa'
,
reader_cfg
=
piqa_reader_cfg
,
infer_cfg
=
piqa_infer_cfg
,
eval_cfg
=
piqa_eval_cfg
)
]
configs/datasets/qabench/qabench_gen.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.qabench_gen_0d5967
import
qabench_datasets
# noqa: F401, F403
configs/datasets/race/race_gen_12de48.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
RaceDataset
race_reader_cfg
=
dict
(
input_columns
=
[
'article'
,
'question'
,
'A'
,
'B'
,
'C'
,
'D'
],
output_column
=
'answer'
)
race_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"Read the article, and answer the question by replying A, B, C or D.
\n\n
Article:
\n
{article}
\n\n
Q: {question}
\n\n
A. {A}
\n
B. {B}
\n
C. {C}
\n
D. {D}"
),
])),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
race_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_postprocessor
=
dict
(
type
=
'first-capital'
),
pred_role
=
'BOT'
)
race_datasets
=
[
dict
(
type
=
RaceDataset
,
abbr
=
'race-middle'
,
path
=
'race'
,
name
=
'middle'
,
reader_cfg
=
race_reader_cfg
,
infer_cfg
=
race_infer_cfg
,
eval_cfg
=
race_eval_cfg
),
dict
(
type
=
RaceDataset
,
abbr
=
'race-high'
,
path
=
'race'
,
name
=
'high'
,
reader_cfg
=
race_reader_cfg
,
infer_cfg
=
race_infer_cfg
,
eval_cfg
=
race_eval_cfg
)
]
configs/datasets/race/race_gen_d18b89.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
RaceDataset
race_reader_cfg
=
dict
(
input_columns
=
[
'article'
,
'question'
,
'A'
,
'B'
,
'C'
,
'D'
],
output_column
=
'answer'
)
race_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
'Read the article, and answer the question by replying A, B, C or D.
\n\n
{article}
\n\n
Q: {question}
\n\n
A. {A}
\n
B. {B}
\n
C. {C}
\n
D. {D}'
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
race_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_postprocessor
=
dict
(
type
=
'first-capital'
))
race_datasets
=
[
dict
(
type
=
RaceDataset
,
abbr
=
'race-middle'
,
path
=
'race'
,
name
=
'middle'
,
reader_cfg
=
race_reader_cfg
,
infer_cfg
=
race_infer_cfg
,
eval_cfg
=
race_eval_cfg
),
dict
(
type
=
RaceDataset
,
abbr
=
'race-high'
,
path
=
'race'
,
name
=
'high'
,
reader_cfg
=
race_reader_cfg
,
infer_cfg
=
race_infer_cfg
,
eval_cfg
=
race_eval_cfg
)
]
configs/datasets/race/race_ppl.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.race_ppl_04e06a
import
race_datasets
# noqa: F401, F403
configs/datasets/realtoxicprompts/realtoxicprompts_gen.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.realtoxicprompts_gen_3ea730
import
realtoxicprompts_datasets
# noqa: F401, F403
configs/datasets/safety/safety_gen.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.safety_gen_c0a5b8
import
safety_datasets
# noqa: F401, F403
configs/datasets/siqa/siqa_gen_a3c714.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
siqaDataset_V2
siqa_reader_cfg
=
dict
(
input_columns
=
[
"context"
,
"question"
,
"answerA"
,
"answerB"
,
"answerC"
],
output_column
=
"label"
,
test_split
=
"validation"
)
siqa_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
"{context}
\n
Question: {question}
\n
A. {answerA}
\n
B. {answerB}
\n
C. {answerC}
\n
Answer:"
)
],
),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
siqa_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
siqa_datasets
=
[
dict
(
abbr
=
"siqa"
,
type
=
siqaDataset_V2
,
path
=
"social_i_qa"
,
reader_cfg
=
siqa_reader_cfg
,
infer_cfg
=
siqa_infer_cfg
,
eval_cfg
=
siqa_eval_cfg
)
]
configs/datasets/siqa/siqa_ppl.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.siqa_ppl_049da0
import
siqa_datasets
# noqa: F401, F403
configs/datasets/storycloze/storycloze_ppl.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.storycloze_ppl_c1912d
import
storycloze_datasets
# noqa: F401, F403
configs/datasets/storycloze/storycloze_ppl_7f4c64.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
PPLInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
storyclozeDataset
storycloze_reader_cfg
=
dict
(
input_columns
=
[
'context'
,
'sentence_quiz1'
,
'sentence_quiz2'
],
output_column
=
'answer_right_ending'
,
train_split
=
'test'
,
test_split
=
'test'
)
storycloze_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
1
:
"{context}{sentence_quiz1}"
,
2
:
"{context}{sentence_quiz2}"
,
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
storycloze_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
))
# The original story cloze dataset and repo are not long maintaining.
# Using multilingual version of this dataset.
storycloze_datasets
=
[
dict
(
abbr
=
'story_cloze'
,
type
=
storyclozeDataset
,
path
=
'juletxara/xstory_cloze'
,
name
=
'en'
,
reader_cfg
=
storycloze_reader_cfg
,
infer_cfg
=
storycloze_infer_cfg
,
eval_cfg
=
storycloze_eval_cfg
)
]
configs/datasets/summedits/summedits_gen.py
0 → 100644
View file @
fb111087
from
mmengine.config
import
read_base
with
read_base
():
from
.summedits_gen_4f35b5
import
summedits_datasets
# noqa: F401, F403
configs/datasets/summedits/summedits_gen_4f35b5.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
AccEvaluator
from
opencompass.datasets
import
SummeditsDataset_V2
summedits_reader_cfg
=
dict
(
input_columns
=
[
'doc'
,
'summary'
],
output_column
=
'label'
)
summedits_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
prompt
=
'Document:
\n
{doc}Summary:
\n
{summary}
\n
Question:
\n
Is the summary factually consistent with the document?
\n
A. Yes
\n
B. No
\n
Answer:'
),
])),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
summedits_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"BOT"
,
pred_postprocessor
=
dict
(
type
=
"first-capital"
),
)
summedits_datasets
=
[
dict
(
abbr
=
'summedits'
,
type
=
SummeditsDataset_V2
,
path
=
'./data/summedits/summedits.jsonl'
,
reader_cfg
=
summedits_reader_cfg
,
infer_cfg
=
summedits_infer_cfg
,
eval_cfg
=
summedits_eval_cfg
)
]
configs/datasets/triviaqarc/triviaqarc_gen_a02306.py
0 → 100644
View file @
fb111087
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.datasets
import
TriviaQArcDataset
,
TriviaQAEvaluator
triviaqarc_reader_cfg
=
dict
(
input_columns
=
[
'question'
,
'evidence'
],
output_column
=
'answer'
,
train_split
=
'dev'
,
test_split
=
'dev'
)
triviaqarc_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
"{evidence}
\n
Answer these questions:
\n
Q: {question}?
\n
A:"
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
,
max_out_len
=
50
,
max_seq_len
=
8192
,
batch_size
=
4
))
triviaqarc_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
TriviaQAEvaluator
))
triviaqarc_datasets
=
[
dict
(
type
=
TriviaQArcDataset
,
abbr
=
'triviaqarc'
,
path
=
'./data/triviaqa-rc/'
,
reader_cfg
=
triviaqarc_reader_cfg
,
infer_cfg
=
triviaqarc_infer_cfg
,
eval_cfg
=
triviaqarc_eval_cfg
)
]
configs/models/classic/tigerbot-7b-sft.py
0 → 100644
View file @
fb111087
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
begin
=
'
\n\n
### Instruction:
\n
:'
),
dict
(
role
=
'BOT'
,
begin
=
'
\n\n
### Response:
\n
:'
,
generate
=
True
),
],
)
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'TigerBot-SFT'
,
path
=
"TigerResearch/tigerbot-7b-sft"
,
tokenizer_path
=
'TigerResearch/tigerbot-7b-sft'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
),
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
meta_template
=
_meta_template
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
,
revision
=
'0ba4d6fc479bdedd6a3f8d4d3425025c5f501800'
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
]
configs/summarizers/groups/bbh.py
0 → 100644
View file @
fb111087
bbh_summary_groups
=
[]
# bbh
_bbh
=
[
'temporal_sequences'
,
'disambiguation_qa'
,
'date_understanding'
,
'tracking_shuffled_objects_three_objects'
,
'penguins_in_a_table'
,
'geometric_shapes'
,
'snarks'
,
'ruin_names'
,
'tracking_shuffled_objects_seven_objects'
,
'tracking_shuffled_objects_five_objects'
,
'logical_deduction_three_objects'
,
'hyperbaton'
,
'logical_deduction_five_objects'
,
'logical_deduction_seven_objects'
,
'movie_recommendation'
,
'salient_translation_error_detection'
,
'reasoning_about_colored_objects'
,
'multistep_arithmetic_two'
,
'navigate'
,
'dyck_languages'
,
'word_sorting'
,
'sports_understanding'
,
'boolean_expressions'
,
'object_counting'
,
'formal_fallacies'
,
'causal_judgement'
,
'web_of_lies'
]
_bbh
=
[
'bbh-'
+
s
for
s
in
_bbh
]
bbh_summary_groups
.
append
({
'name'
:
'bbh'
,
'subsets'
:
_bbh
})
docs/en/_templates/404.html
0 → 100644
View file @
fb111087
{% extends "layout.html" %}
{% block body %}
<h1>
Page Not Found
</h1>
<p>
The page you are looking for cannot be found.
</p>
<p>
If you just switched documentation versions, it is likely that the page you were on is moved. You can look for it in
the content table left, or go to
<a
href=
"{{ pathto(root_doc) }}"
>
the homepage
</a>
.
</p>
<!-- <p>
If you cannot find documentation you want, please <a
href="">open an issue</a> to tell us!
</p> -->
{% endblock %}
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment