Commit be3dfa50 authored by jerrrrry's avatar jerrrrry
Browse files

Initial commit

parents
Pipeline #2876 failed with stages
in 0 seconds
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
ocnli_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'], output_column='label')
# TODO: two prompt templates for ocnli
ocnli_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
'contradiction':
'阅读文章:{sentence1}\n根据上文,回答如下问题: {sentence2}?\n答:错',
'entailment': '阅读文章:{sentence1}\n根据上文,回答如下问题: {sentence2}?\n答:对',
'neutral': '如果{sentence1}为真,那么{sentence2}也为真吗?可能'
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
ocnli_eval_cfg = dict(evaluator=dict(type=AccEvaluator), )
ocnli_datasets = [
dict(
type=HFDataset,
abbr='ocnli',
path='json',
split='train',
data_files='./data/CLUE/OCNLI/dev.json',
reader_cfg=ocnli_reader_cfg,
infer_cfg=ocnli_infer_cfg,
eval_cfg=ocnli_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
ocnli_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'], output_column='label')
# TODO: two prompt templates for ocnli
ocnli_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
'contradiction':
dict(round=[
dict(
role='HUMAN',
prompt='阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?'),
dict(role='BOT', prompt='错')
]),
'entailment':
dict(round=[
dict(
role='HUMAN',
prompt='阅读文章:{sentence1}\n根据上文,回答如下问题:{sentence2}?'),
dict(role='BOT', prompt='对')
]),
'neutral':
dict(round=[
dict(
role='HUMAN', prompt='如果{sentence1}为真,那么{sentence2}也为真吗?'),
dict(role='BOT', prompt='可能')
]),
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
ocnli_eval_cfg = dict(evaluator=dict(type=AccEvaluator), )
ocnli_datasets = [
dict(
type=HFDataset,
abbr='ocnli',
path='json',
split='train',
data_files='./data/CLUE/OCNLI/dev.json',
reader_cfg=ocnli_reader_cfg,
infer_cfg=ocnli_infer_cfg,
eval_cfg=ocnli_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
ocnli_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'], output_column='label')
# TODO: two prompt templates for ocnli
ocnli_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
'contradiction':
dict(round=[
dict(
role='HUMAN',
prompt='语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?'
),
dict(role='BOT', prompt='矛盾')
]),
'entailment':
dict(round=[
dict(
role='HUMAN',
prompt='语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?'
),
dict(role='BOT', prompt='蕴含')
]),
'neutral':
dict(round=[
dict(
role='HUMAN',
prompt='语句一:“{sentence1}”\n语句二:“{sentence2}”\n请问这两句话是什么关系?'
),
dict(role='BOT', prompt='无关')
]),
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
ocnli_eval_cfg = dict(evaluator=dict(type=AccEvaluator), )
ocnli_datasets = [
dict(
type=HFDataset,
abbr='ocnli',
path='json',
split='train',
data_files='./data/CLUE/OCNLI/dev.json',
reader_cfg=ocnli_reader_cfg,
infer_cfg=ocnli_infer_cfg,
eval_cfg=ocnli_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import FixKRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import ChemBenchDataset
from opencompass.utils.text_postprocessors import first_capital_postprocess
chembench_reader_cfg = dict(
input_columns=['input', 'A', 'B', 'C', 'D'],
output_column='target',
train_split='dev')
chembench_all_sets = [
'Name_Conversion',
'Property_Prediction',
'Mol2caption',
'Caption2mol',
'Product_Prediction',
'Retrosynthesis',
'Yield_Prediction',
'Temperature_Prediction',
'Solvent_Prediction'
]
chembench_datasets = []
for _name in chembench_all_sets:
# _hint = f'There is a single choice question about {_name.replace("_", " ")}. Answer the question by replying A, B, C or D.'
_hint = f'There is a single choice question about chemistry. Answer the question by replying A, B, C or D.'
chembench_infer_cfg = dict(
ice_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
f'{_hint}\nQuestion: {{input}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nAnswer: '
),
dict(role='BOT', prompt='{target}\n')
]),
),
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin='</E>',
round=[
dict(
role='HUMAN',
prompt=
f'{_hint}\nQuestion: {{input}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nAnswer: '
),
],
),
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=[0, 1, 2, 3, 4]),
inferencer=dict(type=GenInferencer),
)
chembench_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_postprocessor=dict(type=first_capital_postprocess))
chembench_datasets.append(
dict(
abbr=f'ChemBench_{_name}',
type=ChemBenchDataset,
path='opencompass/ChemBench',
name=_name,
reader_cfg=chembench_reader_cfg,
infer_cfg=chembench_infer_cfg,
eval_cfg=chembench_eval_cfg,
))
del _name, _hint
from mmengine.config import read_base
with read_base():
from .FewCLUE_bustm_gen_634f41 import bustm_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import AFQMCDatasetV2
from opencompass.utils.text_postprocessors import first_capital_postprocess
bustm_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
bustm_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n请判断语句一和语句二说的是否是一个意思?\nA. 无关\nB. 相关\n请从“A”,“B”中进行选择。\n答:',
),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
bustm_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
bustm_datasets = [
dict(
abbr='bustm-dev',
type=AFQMCDatasetV2, # bustm share the same format with AFQMC
path='./data/FewCLUE/bustm/dev_few_all.json',
local_mode=True,
reader_cfg=bustm_reader_cfg,
infer_cfg=bustm_infer_cfg,
eval_cfg=bustm_eval_cfg,
),
dict(
abbr='bustm-test',
type=AFQMCDatasetV2, # bustm share the same format with AFQMC
path='./data/FewCLUE/bustm/test_public.json',
local_mode=True,
reader_cfg=bustm_reader_cfg,
infer_cfg=bustm_infer_cfg,
eval_cfg=bustm_eval_cfg,
),
]
from mmengine.config import read_base
with read_base():
from .FewCLUE_bustm_ppl_e53034 import bustm_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
bustm_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
bustm_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0:
dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt='请判断以下两句话说的是否是一个意思:')
],
round=[
dict(role='HUMAN', prompt='{sentence1},{sentence2}'),
dict(role='BOT', prompt='两句话说的毫不相关。')
]),
1:
dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt='请判断以下两句话说的是否是一个意思:')
],
round=[
dict(role='HUMAN', prompt='{sentence1},{sentence2}'),
dict(role='BOT', prompt='两句话说是的一个意思。')
]),
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
bustm_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
bustm_datasets = [
dict(
type=HFDataset,
abbr='bustm-dev',
path='json',
data_files='./data/FewCLUE/bustm/dev_few_all.json',
split='train',
reader_cfg=bustm_reader_cfg,
infer_cfg=bustm_infer_cfg,
eval_cfg=bustm_eval_cfg),
dict(
type=HFDataset,
abbr='bustm-test',
path='json',
data_files='./data/FewCLUE/bustm/test_public.json',
split='train',
reader_cfg=bustm_reader_cfg,
infer_cfg=bustm_infer_cfg,
eval_cfg=bustm_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
bustm_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
bustm_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0: '{sentence1}。\n{sentence2}。\n两句话说的毫不相关。',
1: '{sentence1}。\n{sentence2}。\n两句话说的一个意思。'
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
bustm_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
bustm_datasets = [
dict(
type=HFDataset,
abbr='bustm-dev',
path='json',
data_files='./data/FewCLUE/bustm/dev_few_all.json',
split='train',
reader_cfg=bustm_reader_cfg,
infer_cfg=bustm_infer_cfg,
eval_cfg=bustm_eval_cfg),
dict(
type=HFDataset,
abbr='bustm-test',
path='json',
data_files='./data/FewCLUE/bustm/test_public.json',
split='train',
reader_cfg=bustm_reader_cfg,
infer_cfg=bustm_infer_cfg,
eval_cfg=bustm_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HFDataset
bustm_reader_cfg = dict(
input_columns=['sentence1', 'sentence2'],
output_column='label',
test_split='train')
bustm_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0:
dict(round=[
dict(
role='HUMAN',
prompt=
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n请判断语句一和语句二说的是否是一个意思?'
),
dict(role='BOT', prompt='两句话说的毫不相关。')
]),
1:
dict(round=[
dict(
role='HUMAN',
prompt=
'语句一:“{sentence1}”\n语句二:“{sentence2}”\n请判断语句一和语句二说的是否是一个意思?'
),
dict(role='BOT', prompt='两句话说是的一个意思。')
]),
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
bustm_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
bustm_datasets = [
dict(
type=HFDataset,
abbr='bustm-dev',
path='json',
data_files='./data/FewCLUE/bustm/dev_few_all.json',
split='train',
reader_cfg=bustm_reader_cfg,
infer_cfg=bustm_infer_cfg,
eval_cfg=bustm_eval_cfg),
dict(
type=HFDataset,
abbr='bustm-test',
path='json',
data_files='./data/FewCLUE/bustm/test_public.json',
split='train',
reader_cfg=bustm_reader_cfg,
infer_cfg=bustm_infer_cfg,
eval_cfg=bustm_eval_cfg)
]
from mmengine.config import read_base
with read_base():
from .FewCLUE_chid_gen_0a29a2 import chid_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import CHIDDatasetV2
from opencompass.utils.text_postprocessors import first_capital_postprocess
chid_reader_cfg = dict(
input_columns=['content','A','B','C','D','E','F','G'],
output_column='answer',
)
chid_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=
'{content}\n请选择______处所填的词\nA. {A}\nB. {B}\nC. {C}\nD. {D}\nE. {E}\nF. {F}\nG. {G}\n请从”A“,”B“,”C“,”D“,”E“,”F“,”G“中进行选择。答:',
),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
chid_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
chid_datasets = [
dict(
abbr='chid-dev',
type=CHIDDatasetV2,
path='./data/FewCLUE/chid/dev_few_all.json',
reader_cfg=chid_reader_cfg,
infer_cfg=chid_infer_cfg,
eval_cfg=chid_eval_cfg,
),
dict(
abbr='chid-test',
type=CHIDDatasetV2,
path='./data/FewCLUE/chid/test_public.json',
reader_cfg=chid_reader_cfg,
infer_cfg=chid_infer_cfg,
eval_cfg=chid_eval_cfg,
),
]
from mmengine.config import read_base
with read_base():
from .FewCLUE_chid_ppl_8f2872 import chid_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import CHIDDataset
chid_reader_cfg = dict(
input_columns=[f'content{i}' for i in range(7)], output_column='answer')
chid_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
i: dict(
round=[
dict(role='HUMAN', prompt=f'以下句子是否通顺?\n{{content{i}}}'),
dict(role='BOT', prompt='这个句子是通顺的。'),
], )
for i in range(7)
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
chid_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
chid_datasets = [
dict(
type=CHIDDataset,
path='json',
abbr='chid-dev',
data_files='./data/FewCLUE/chid/dev_few_all.json',
split='train',
reader_cfg=chid_reader_cfg,
infer_cfg=chid_infer_cfg,
eval_cfg=chid_eval_cfg),
dict(
type=CHIDDataset,
path='json',
abbr='chid-test',
data_files='./data/FewCLUE/chid/test_public.json',
split='train',
reader_cfg=chid_reader_cfg,
infer_cfg=chid_infer_cfg,
eval_cfg=chid_eval_cfg),
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import CHIDDataset
chid_reader_cfg = dict(
input_columns=[f'content{i}' for i in range(7)], output_column='answer')
chid_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={i: f'以下句子是否通顺?\n{{content{i}}}\n这个句子是通顺的。'
for i in range(7)}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
chid_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
chid_datasets = [
dict(
type=CHIDDataset,
path='json',
abbr='chid-dev',
data_files='./data/FewCLUE/chid/dev_few_all.json',
split='train',
reader_cfg=chid_reader_cfg,
infer_cfg=chid_infer_cfg,
eval_cfg=chid_eval_cfg),
dict(
type=CHIDDataset,
path='json',
abbr='chid-test',
data_files='./data/FewCLUE/chid/test_public.json',
split='train',
reader_cfg=chid_reader_cfg,
infer_cfg=chid_infer_cfg,
eval_cfg=chid_eval_cfg),
]
from mmengine.config import read_base
with read_base():
from .FewCLUE_cluewsc_gen_c68933 import cluewsc_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import CluewscDatasetV2
from opencompass.utils.text_postprocessors import first_capital_postprocess
cluewsc_reader_cfg = dict(
input_columns=['span1', 'span2', 'text', 'new_text'],
output_column='label',
)
cluewsc_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
'{text}\n此处,“{span2}”是否指代“{span1}“?\nA. 是\nB. 否\n请从”A“,”B“中进行选择。\n答:',
),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
cluewsc_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=first_capital_postprocess),
)
cluewsc_datasets = [
dict(
abbr='cluewsc-dev',
type=CluewscDatasetV2,
path='./data/FewCLUE/cluewsc/dev_few_all.json',
reader_cfg=cluewsc_reader_cfg,
infer_cfg=cluewsc_infer_cfg,
eval_cfg=cluewsc_eval_cfg,
),
dict(
abbr='cluewsc-test',
type=CluewscDatasetV2,
path='./data/FewCLUE/cluewsc/test_public.json',
reader_cfg=cluewsc_reader_cfg,
infer_cfg=cluewsc_infer_cfg,
eval_cfg=cluewsc_eval_cfg,
),
]
from mmengine.config import read_base
with read_base():
from .FewCLUE_cluewsc_ppl_868415 import cluewsc_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import CluewscDataset
cluewsc_reader_cfg = dict(
input_columns=['span1', 'span2', 'text', 'new_text'],
output_column='answer')
cluewsc_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0:
dict(round=[
dict(
role='HUMAN',
prompt=
"{text}\nHere, is the pronoun \"{span2}\" used to mean \"{span1}\"?"
),
dict(role='BOT', prompt='No.')
]),
1:
dict(round=[
dict(
role='HUMAN',
prompt=
"{text}\nHere, is the pronoun \"{span2}\" used to mean \"{span1}\"?"
),
dict(role='BOT', prompt='Yes.')
]),
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
cluewsc_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
cluewsc_datasets = [
dict(
type=CluewscDataset,
path='json',
abbr='cluewsc-dev',
data_files='./data/FewCLUE/cluewsc/dev_few_all.json',
split='train',
reader_cfg=cluewsc_reader_cfg,
infer_cfg=cluewsc_infer_cfg,
eval_cfg=cluewsc_eval_cfg),
dict(
type=CluewscDataset,
path='json',
abbr='cluewsc-test',
data_files='./data/FewCLUE/cluewsc/test_public.json',
split='train',
reader_cfg=cluewsc_reader_cfg,
infer_cfg=cluewsc_infer_cfg,
eval_cfg=cluewsc_eval_cfg),
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import CluewscDataset
cluewsc_reader_cfg = dict(
input_columns=['span1', 'span2', 'text', 'new_text'],
output_column='answer')
cluewsc_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
0:
"{text}\nHere, is the pronoun \"{span2}\" used to mean \"{span1}\"? No.",
1:
"{text}\nHere, is the pronoun \"{span2}\" used to mean \"{span1}\"? Yes.",
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer))
cluewsc_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
cluewsc_datasets = [
dict(
type=CluewscDataset,
path='json',
abbr='cluewsc-dev',
data_files='./data/FewCLUE/cluewsc/dev_few_all.json',
split='train',
reader_cfg=cluewsc_reader_cfg,
infer_cfg=cluewsc_infer_cfg,
eval_cfg=cluewsc_eval_cfg),
dict(
type=CluewscDataset,
path='json',
abbr='cluewsc-test',
data_files='./data/FewCLUE/cluewsc/test_public.json',
split='train',
reader_cfg=cluewsc_reader_cfg,
infer_cfg=cluewsc_infer_cfg,
eval_cfg=cluewsc_eval_cfg),
]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment