Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
bf79ff1c
Unverified
Commit
bf79ff1c
authored
Aug 11, 2023
by
Tong Gao
Committed by
GitHub
Aug 11, 2023
Browse files
[Feature] Add LEval datasets
Co-authored-by:
kennymckormick
<
dhd@pku.edu.cn
>
parent
8d9cee06
Changes
76
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
403 additions
and
1 deletion
+403
-1
configs/datasets/LEvalPaperAssistant/LEval_paper_assistant_gen.py
...datasets/LEvalPaperAssistant/LEval_paper_assistant_gen.py
+4
-0
configs/datasets/LEvalPaperAssistant/LEval_paper_assistant_gen_6c03d0.py
...s/LEvalPaperAssistant/LEval_paper_assistant_gen_6c03d0.py
+40
-0
configs/datasets/LEvalPatentSumm/LEval_patent_summ_gen.py
configs/datasets/LEvalPatentSumm/LEval_patent_summ_gen.py
+4
-0
configs/datasets/LEvalPatentSumm/LEval_patent_summ_gen_db3565.py
.../datasets/LEvalPatentSumm/LEval_patent_summ_gen_db3565.py
+40
-0
configs/datasets/LEvalQuality/LEval_quality_gen.py
configs/datasets/LEvalQuality/LEval_quality_gen.py
+4
-0
configs/datasets/LEvalQuality/LEval_quality_gen_bd35f4.py
configs/datasets/LEvalQuality/LEval_quality_gen_bd35f4.py
+42
-0
configs/datasets/LEvalReviewSumm/LEval_review_summ_gen.py
configs/datasets/LEvalReviewSumm/LEval_review_summ_gen.py
+4
-0
configs/datasets/LEvalReviewSumm/LEval_review_summ_gen_6c03d0.py
.../datasets/LEvalReviewSumm/LEval_review_summ_gen_6c03d0.py
+40
-0
configs/datasets/LEvalScientificQA/LEval_scientificqa_gen.py
configs/datasets/LEvalScientificQA/LEval_scientificqa_gen.py
+4
-0
configs/datasets/LEvalScientificQA/LEval_scientificqa_gen_0c6e71.py
...tasets/LEvalScientificQA/LEval_scientificqa_gen_0c6e71.py
+40
-0
configs/datasets/LEvalTPO/LEval_tpo_gen.py
configs/datasets/LEvalTPO/LEval_tpo_gen.py
+4
-0
configs/datasets/LEvalTPO/LEval_tpo_gen_bd35f4.py
configs/datasets/LEvalTPO/LEval_tpo_gen_bd35f4.py
+42
-0
configs/datasets/LEvalTVShowSumm/.ipynb_checkpoints/LEval_tvshow_summ_gen-checkpoint.py
...mm/.ipynb_checkpoints/LEval_tvshow_summ_gen-checkpoint.py
+4
-0
configs/datasets/LEvalTVShowSumm/.ipynb_checkpoints/LEval_tvshow_summ_gen_rouge-checkpoint.py
...ynb_checkpoints/LEval_tvshow_summ_gen_rouge-checkpoint.py
+40
-0
configs/datasets/LEvalTVShowSumm/LEval_tvshow_summ_gen.py
configs/datasets/LEvalTVShowSumm/LEval_tvshow_summ_gen.py
+4
-0
configs/datasets/LEvalTVShowSumm/LEval_tvshow_summ_gen_049a5c.py
.../datasets/LEvalTVShowSumm/LEval_tvshow_summ_gen_049a5c.py
+40
-0
configs/datasets/LEvalTopicRetrieval/LEval_topic_retrieval_gen.py
...datasets/LEvalTopicRetrieval/LEval_topic_retrieval_gen.py
+4
-0
configs/datasets/LEvalTopicRetrieval/LEval_topic_retrieval_gen_af0562.py
...s/LEvalTopicRetrieval/LEval_topic_retrieval_gen_af0562.py
+42
-0
configs/datasets/agieval/agieval_gen.py
configs/datasets/agieval/agieval_gen.py
+1
-1
configs/datasets/agieval/agieval_gen_64afd3.py
configs/datasets/agieval/agieval_gen_64afd3.py
+0
-0
No files found.
configs/datasets/LEvalPaperAssistant/LEval_paper_assistant_gen.py
0 → 100644
View file @
bf79ff1c
from
mmengine.config
import
read_base
with
read_base
():
from
.LEval_paper_assistant_gen_6c03d0
import
LEval_ps_summ_datasets
# noqa: F401, F403
configs/datasets/LEvalPaperAssistant/LEval_paper_assistant_gen_6c03d0.py
0 → 100644
View file @
bf79ff1c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
,
RougeEvaluator
,
SquadEvaluator
from
opencompass.datasets
import
LEvalPaperAssistantDataset
LEval_ps_summ_reader_cfg
=
dict
(
input_columns
=
[
'context'
,
'question'
],
output_column
=
'answer'
,
train_split
=
'test'
,
test_split
=
'test'
)
LEval_ps_summ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{context}
\n
Question: {question}
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
''
),
],
)),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
,
max_out_len
=
512
)
)
LEval_ps_summ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
RougeEvaluator
),
pred_role
=
'BOT'
)
LEval_ps_summ_datasets
=
[
dict
(
type
=
LEvalPaperAssistantDataset
,
abbr
=
'LEval_paper_assistant'
,
path
=
'L4NLP/LEval'
,
name
=
'paper_assistant'
,
reader_cfg
=
LEval_ps_summ_reader_cfg
,
infer_cfg
=
LEval_ps_summ_infer_cfg
,
eval_cfg
=
LEval_ps_summ_eval_cfg
)
]
configs/datasets/LEvalPatentSumm/LEval_patent_summ_gen.py
0 → 100644
View file @
bf79ff1c
from
mmengine.config
import
read_base
with
read_base
():
from
.LEval_patent_summ_gen_db3565
import
LEval_patent_summ_datasets
# noqa: F401, F403
configs/datasets/LEvalPatentSumm/LEval_patent_summ_gen_db3565.py
0 → 100644
View file @
bf79ff1c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
,
RougeEvaluator
,
SquadEvaluator
from
opencompass.datasets
import
LEvalPatentSummDataset
LEval_patent_summ_reader_cfg
=
dict
(
input_columns
=
[
'context'
,
'question'
],
output_column
=
'answer'
,
train_split
=
'test'
,
test_split
=
'test'
)
LEval_patent_summ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{context}
\n
{question}
\n
TL;DR:'
),
dict
(
role
=
'BOT'
,
prompt
=
''
),
],
)),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
,
max_out_len
=
512
)
)
LEval_patent_summ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
RougeEvaluator
),
pred_role
=
'BOT'
)
LEval_patent_summ_datasets
=
[
dict
(
type
=
LEvalPatentSummDataset
,
abbr
=
'LEval_patent_summ'
,
path
=
'L4NLP/LEval'
,
name
=
'patent_summ'
,
reader_cfg
=
LEval_patent_summ_reader_cfg
,
infer_cfg
=
LEval_patent_summ_infer_cfg
,
eval_cfg
=
LEval_patent_summ_eval_cfg
)
]
configs/datasets/LEvalQuality/LEval_quality_gen.py
0 → 100644
View file @
bf79ff1c
from
mmengine.config
import
read_base
with
read_base
():
from
.LEval_quality_gen_bd35f4
import
LEval_quality_datasets
# noqa: F401, F403
configs/datasets/LEvalQuality/LEval_quality_gen_bd35f4.py
0 → 100644
View file @
bf79ff1c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
,
RougeEvaluator
,
SquadEvaluator
,
AccEvaluator
from
opencompass.datasets
import
LEvalQualityDataset
from
opencompass.utils.text_postprocessors
import
first_capital_postprocess
,
first_capital_postprocess_multi
LEval_quality_reader_cfg
=
dict
(
input_columns
=
[
'context'
,
'question'
],
output_column
=
'answer'
,
train_split
=
'test'
,
test_split
=
'test'
)
LEval_quality_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{context}
\n
Question: {question}
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
''
),
],
)),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
,
max_out_len
=
10
)
)
LEval_quality_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_postprocessor
=
dict
(
type
=
first_capital_postprocess
),
pred_role
=
'BOT'
)
LEval_quality_datasets
=
[
dict
(
type
=
LEvalQualityDataset
,
abbr
=
'LEval_quality'
,
path
=
'L4NLP/LEval'
,
name
=
'quality'
,
reader_cfg
=
LEval_quality_reader_cfg
,
infer_cfg
=
LEval_quality_infer_cfg
,
eval_cfg
=
LEval_quality_eval_cfg
)
]
configs/datasets/LEvalReviewSumm/LEval_review_summ_gen.py
0 → 100644
View file @
bf79ff1c
from
mmengine.config
import
read_base
with
read_base
():
from
.LEval_review_summ_gen_6c03d0
import
LEval_review_summ_datasets
# noqa: F401, F403
configs/datasets/LEvalReviewSumm/LEval_review_summ_gen_6c03d0.py
0 → 100644
View file @
bf79ff1c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
,
RougeEvaluator
,
SquadEvaluator
from
opencompass.datasets
import
LEvalReviewSummDataset
LEval_review_summ_reader_cfg
=
dict
(
input_columns
=
[
'context'
,
'question'
],
output_column
=
'answer'
,
train_split
=
'test'
,
test_split
=
'test'
)
LEval_review_summ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{context}
\n
Question: {question}
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
''
),
],
)),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
,
max_out_len
=
512
)
)
LEval_review_summ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
RougeEvaluator
),
pred_role
=
'BOT'
)
LEval_review_summ_datasets
=
[
dict
(
type
=
LEvalReviewSummDataset
,
abbr
=
'LEval_review_summ'
,
path
=
'L4NLP/LEval'
,
name
=
'review_summ'
,
reader_cfg
=
LEval_review_summ_reader_cfg
,
infer_cfg
=
LEval_review_summ_infer_cfg
,
eval_cfg
=
LEval_review_summ_eval_cfg
)
]
configs/datasets/LEvalScientificQA/LEval_scientificqa_gen.py
0 → 100644
View file @
bf79ff1c
from
mmengine.config
import
read_base
with
read_base
():
from
.LEval_scientificqa_gen_0c6e71
import
LEval_scientificqa_datasets
# noqa: F401, F403
configs/datasets/LEvalScientificQA/LEval_scientificqa_gen_0c6e71.py
0 → 100644
View file @
bf79ff1c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
,
RougeEvaluator
from
opencompass.datasets
import
LEvalScientificQADataset
LEval_scientificqa_reader_cfg
=
dict
(
input_columns
=
[
'context'
,
'question'
],
output_column
=
'answer'
,
train_split
=
'test'
,
test_split
=
'test'
)
LEval_scientificqa_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{context}
\n
Question: {question}
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
''
),
],
)),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
,
max_out_len
=
64
)
)
LEval_scientificqa_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
RougeEvaluator
),
pred_role
=
'BOT'
)
LEval_scientificqa_datasets
=
[
dict
(
type
=
LEvalScientificQADataset
,
abbr
=
'LEval_scientificqa'
,
path
=
'L4NLP/LEval'
,
name
=
'scientific_qa'
,
reader_cfg
=
LEval_scientificqa_reader_cfg
,
infer_cfg
=
LEval_scientificqa_infer_cfg
,
eval_cfg
=
LEval_scientificqa_eval_cfg
)
]
configs/datasets/LEvalTPO/LEval_tpo_gen.py
0 → 100644
View file @
bf79ff1c
from
mmengine.config
import
read_base
with
read_base
():
from
.LEval_tpo_gen_bd35f4
import
LEval_tpo_datasets
configs/datasets/LEvalTPO/LEval_tpo_gen_bd35f4.py
0 → 100644
View file @
bf79ff1c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
,
RougeEvaluator
,
SquadEvaluator
,
AccEvaluator
from
opencompass.datasets
import
LEvalTPODataset
from
opencompass.utils.text_postprocessors
import
first_capital_postprocess
,
first_capital_postprocess_multi
LEval_tpo_reader_cfg
=
dict
(
input_columns
=
[
'context'
,
'question'
],
output_column
=
'answer'
,
train_split
=
'test'
,
test_split
=
'test'
)
LEval_tpo_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{context}
\n
Question: {question}
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
''
),
],
)),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
,
max_out_len
=
10
)
)
LEval_tpo_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_postprocessor
=
dict
(
type
=
first_capital_postprocess
),
pred_role
=
'BOT'
)
LEval_tpo_datasets
=
[
dict
(
type
=
LEvalTPODataset
,
abbr
=
'LEval_tpo'
,
path
=
'L4NLP/LEval'
,
name
=
'tpo'
,
reader_cfg
=
LEval_tpo_reader_cfg
,
infer_cfg
=
LEval_tpo_infer_cfg
,
eval_cfg
=
LEval_tpo_eval_cfg
)
]
configs/datasets/LEvalTVShowSumm/.ipynb_checkpoints/LEval_tvshow_summ_gen-checkpoint.py
0 → 100644
View file @
bf79ff1c
from
mmengine.config
import
read_base
with
read_base
():
from
.LEval_tvshow_summ_gen_rouge
import
LEval_tvshow_summ_datasets
# noqa: F401, F403
configs/datasets/LEvalTVShowSumm/.ipynb_checkpoints/LEval_tvshow_summ_gen_rouge-checkpoint.py
0 → 100644
View file @
bf79ff1c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
,
RougeEvaluator
,
SquadEvaluator
from
opencompass.datasets
import
LEvalTVShowSummDataset
LEval_tvshow_summ_reader_cfg
=
dict
(
input_columns
=
[
'context'
,
'question'
],
output_column
=
'answer'
,
train_split
=
'test'
,
test_split
=
'test'
)
LEval_tvshow_summ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{context}
\n
Question: {question}'
),
dict
(
role
=
'BOT'
,
prompt
=
'TL;DR:'
),
],
)),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
,
max_out_len
=
512
)
)
LEval_tvshow_summ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
RougeEvaluator
),
pred_role
=
'BOT'
)
LEval_tvshow_summ_datasets
=
[
dict
(
type
=
LEvalTVShowSummDataset
,
abbr
=
'LEval_tvshow_summ'
,
path
=
'L4NLP/LEval'
,
name
=
'tv_show_summ'
,
reader_cfg
=
LEval_tvshow_summ_reader_cfg
,
infer_cfg
=
LEval_tvshow_summ_infer_cfg
,
eval_cfg
=
LEval_tvshow_summ_eval_cfg
)
]
configs/datasets/LEvalTVShowSumm/LEval_tvshow_summ_gen.py
0 → 100644
View file @
bf79ff1c
from
mmengine.config
import
read_base
with
read_base
():
from
.LEval_tvshow_summ_gen_049a5c
import
LEval_tvshow_summ_datasets
# noqa: F401, F403
configs/datasets/LEvalTVShowSumm/LEval_tvshow_summ_gen_049a5c.py
0 → 100644
View file @
bf79ff1c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
,
RougeEvaluator
,
SquadEvaluator
from
opencompass.datasets
import
LEvalTVShowSummDataset
LEval_tvshow_summ_reader_cfg
=
dict
(
input_columns
=
[
'context'
,
'question'
],
output_column
=
'answer'
,
train_split
=
'test'
,
test_split
=
'test'
)
LEval_tvshow_summ_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{context}
\n
Question: {question}
\n
TL;DR:'
),
dict
(
role
=
'BOT'
,
prompt
=
''
),
],
)),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
,
max_out_len
=
512
)
)
LEval_tvshow_summ_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
RougeEvaluator
),
pred_role
=
'BOT'
)
LEval_tvshow_summ_datasets
=
[
dict
(
type
=
LEvalTVShowSummDataset
,
abbr
=
'LEval_tvshow_summ'
,
path
=
'L4NLP/LEval'
,
name
=
'tv_show_summ'
,
reader_cfg
=
LEval_tvshow_summ_reader_cfg
,
infer_cfg
=
LEval_tvshow_summ_infer_cfg
,
eval_cfg
=
LEval_tvshow_summ_eval_cfg
)
]
configs/datasets/LEvalTopicRetrieval/LEval_topic_retrieval_gen.py
0 → 100644
View file @
bf79ff1c
from
mmengine.config
import
read_base
with
read_base
():
from
.LEval_topic_retrieval_gen_af0562
import
LEval_tr_datasets
configs/datasets/LEvalTopicRetrieval/LEval_topic_retrieval_gen_af0562.py
0 → 100644
View file @
bf79ff1c
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.openicl.icl_evaluator
import
EMEvaluator
,
RougeEvaluator
,
SquadEvaluator
,
AccEvaluator
from
opencompass.datasets
import
LEvalTopicRetrievalDataset
from
opencompass.utils.text_postprocessors
import
first_capital_postprocess
,
first_capital_postprocess_multi
,
general_postprocess
LEval_tr_reader_cfg
=
dict
(
input_columns
=
[
'context'
,
'question'
],
output_column
=
'answer'
,
train_split
=
'test'
,
test_split
=
'test'
)
LEval_tr_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'{context}
\n
Question: {question}
\n
Answer:'
),
dict
(
role
=
'BOT'
,
prompt
=
''
),
],
)),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
,
max_out_len
=
30
)
)
LEval_tr_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
EMEvaluator
),
pred_postprocessor
=
dict
(
type
=
general_postprocess
),
pred_role
=
'BOT'
)
LEval_tr_datasets
=
[
dict
(
type
=
LEvalTopicRetrievalDataset
,
abbr
=
'LEval_topic_retrieval'
,
path
=
'L4NLP/LEval'
,
name
=
'topic_retrieval_longchat'
,
reader_cfg
=
LEval_tr_reader_cfg
,
infer_cfg
=
LEval_tr_infer_cfg
,
eval_cfg
=
LEval_tr_eval_cfg
)
]
configs/datasets/agieval/agieval_gen.py
View file @
bf79ff1c
from
mmengine.config
import
read_base
with
read_base
():
from
.agieval_gen_
397d81
import
agieval_datasets
# noqa: F401, F403
from
.agieval_gen_
64afd3
import
agieval_datasets
# noqa: F401, F403
configs/datasets/agieval/agieval_gen_
397d81
.py
→
configs/datasets/agieval/agieval_gen_
64afd3
.py
View file @
bf79ff1c
File moved
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment