Unverified Commit 976d8a0b authored by Rima Shahbazyan's avatar Rima Shahbazyan Committed by GitHub
Browse files

Adding new subtask to SCORE tasks: non greedy robustness (#2558)

* score readme added

* generate until task's "until" parameter's default value fixed.

* score mmlu-pro and agieval added

* changed macro accuracy to micro for agieval

* Always E removed from agi eval

* redundancies removed

* MATH added

* minor cosmetic changes for math

* Licenses added Readme updated

* changes for flake8 + license header on math

* Score added to readme and precommit was run.

* Score added to readme and precommit was run.

* Import error fixed

* math task bugfix
postprocess minor fix

* CR for math added

* math CR

* math task bugfix
postprocess minor fix

CR for math added

* Math cr fixed

* mmlu_pro non_greedy task added

* non greedy summarizer added

* Non greedy for all score tasks

* Bugfixes for non-greedy

* fixing the until argument

* undoing the change to "until" arguments default behaviour

* minor fix in summarizer

* log naming changes for better readability

* math subtasks naming fix

* agieval subtask naming fix

* logging added for debugging

* path issue fixed

* minor fix

* path fix

* path fix

* non_greedy_math minor fix

* final changes

* changed readme for non-greedy
added Nvidia header
added wxample script for non_greedy
changed prompts to match that fo trt runs

* non greedy summarizer bugfix

* non_greedy summarizer fixed
parent 8de772f9
{ {
"non_greedy_robustness": {
"prompt": "Calculate the answer to this math problem\nProblem: {question}\nConclude your answer with:\nThe final answer is: $\\boxed{{answer}}$\nwhere [answer] is just the final number or expression that solves the problem."
},
"prompt_robustness": [ "prompt_robustness": [
{ {
"prompt": "Efficiently solve the following math challenge. Explain your approach step-by-step\nThe answer should end with: The final answer is: $\\boxed{{answer}}$\nwhere [answer] is just the final number or expression that solves the problem\nProblem: {question}\nLets think step by step" "prompt": "Efficiently solve the following math challenge. Explain your approach step-by-step\nThe answer should end with: The final answer is: $\\boxed{{answer}}$\nwhere [answer] is just the final number or expression that solves the problem\nProblem: {question}\nLets think step by step"
......
# Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
group: score_non_greedy_robustness_math
task:
- non_greedy_robustness_math_algebra
- non_greedy_robustness_math_counting_and_prob
- non_greedy_robustness_math_geometry
- non_greedy_robustness_math_intermediate_algebra
- non_greedy_robustness_math_num_theory
- non_greedy_robustness_math_prealgebra
- non_greedy_robustness_math_precalc
aggregate_metric_list:
- metric: non_greedy_accuracy
aggregation: mean
weight_by_size: true
metadata:
version: 1.0
...@@ -15,5 +15,6 @@ ...@@ -15,5 +15,6 @@
group: score_robustness_math group: score_robustness_math
task: task:
- score_prompt_robustness_math - score_prompt_robustness_math
- score_non_greedy_robustness_math
metadata: metadata:
version: 1.0 version: 1.0
...@@ -34,6 +34,7 @@ from lm_eval.utils import eval_logger ...@@ -34,6 +34,7 @@ from lm_eval.utils import eval_logger
TEMPLATE_FILE_PATH = os.path.join(os.path.dirname(__file__), "prompt_templates.json") TEMPLATE_FILE_PATH = os.path.join(os.path.dirname(__file__), "prompt_templates.json")
PROMPT_ROBUSTNESS_TEMPLATE_KEY = "prompt_robustness" PROMPT_ROBUSTNESS_TEMPLATE_KEY = "prompt_robustness"
NON_GREEDY_ROBUSTNESS_TEMPLATE_KEY = "non_greedy_robustness"
math_robustness_doc_to_text = robustness_doc_to_text math_robustness_doc_to_text = robustness_doc_to_text
...@@ -141,8 +142,17 @@ def prompt_robustness_process_docs(doc: datasets.Dataset) -> datasets.Dataset: ...@@ -141,8 +142,17 @@ def prompt_robustness_process_docs(doc: datasets.Dataset) -> datasets.Dataset:
doc = process_docs(doc) doc = process_docs(doc)
return utils.process_docs_add_prompts( return utils.process_docs_add_prompts(
doc, doc,
PROMPT_ROBUSTNESS_TEMPLATE_KEY, templates_key=PROMPT_ROBUSTNESS_TEMPLATE_KEY,
TEMPLATE_FILE_PATH, template_file_path=TEMPLATE_FILE_PATH,
)
def non_greedy_robustness_process_docs(doc: datasets.Dataset) -> datasets.Dataset:
doc = process_docs(doc)
return utils.non_greedy_robustness_process_docs(
doc,
templates_key=NON_GREEDY_ROBUSTNESS_TEMPLATE_KEY,
template_file_path=TEMPLATE_FILE_PATH,
) )
...@@ -163,6 +173,13 @@ def process_results(doc: dict, results: List[str]) -> Dict[str, int]: ...@@ -163,6 +173,13 @@ def process_results(doc: dict, results: List[str]) -> Dict[str, int]:
return results return results
def non_greedy_robustness_process_results(
doc: dict, results: List[str]
) -> Dict[str, int]:
answer = extract_answer(results[0])
return {"non_greedy_accuracy": (doc["question_id"], answer, doc["answer"], None)}
def per_prompt_accuracy(results: List[Dict[str, Any]], p_id=0) -> float: def per_prompt_accuracy(results: List[Dict[str, Any]], p_id=0) -> float:
accuracies = [] accuracies = []
for result in results: for result in results:
...@@ -233,3 +250,19 @@ def math_prompt_consistency_rate(results: List[Dict[str, Any]]) -> float: ...@@ -233,3 +250,19 @@ def math_prompt_consistency_rate(results: List[Dict[str, Any]]) -> float:
question_answers_list = [answers for answers in question_answers_dict.values()] question_answers_list = [answers for answers in question_answers_dict.values()]
return calculate_consistency_rate(question_answers_list) return calculate_consistency_rate(question_answers_list)
def non_greedy_accuracy(results: List[Dict[str, Any]]) -> float:
accuracies = []
for result in results:
question_id, final_answer, gt, _ = result
if math_equal(final_answer, gt):
retval = 1
else:
retval = 0
accuracies.append(retval)
accuracy = sum(accuracies) / len(accuracies)
eval_logger.info(f"Non greedy accuracy: {accuracy}")
return np.round(accuracy, 4)
{ {
"option_order_robustness":{ "option_order_robustness":{
"prompt": "For the multiple-choice question related to {category}, which option (A-J) is correct?.\n\nQuestion: {question}{options}\n\nEnd the answer with the following:\nThe best answer is (the_answer_letter) where the (the_answer_letter) is one of 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I' or 'J'.", "prompt": "For the multiple-choice question related to {category}, which option (A-J) is correct?.\n\nQuestion:{question}{options}\nEnd the answer with the following:\nThe best answer is (the_answer_letter) where the (the_answer_letter) is one of 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I' or 'J'.",
"options_format": "\n{letter}: {option}"
},
"non_greedy_robustness":{
"prompt": "For the multiple-choice question related to {category}, which option (A-J) is correct?.\n\nQuestion:{question}{options}\nEnd the answer with the following:\nThe best answer is (the_answer_letter) where the (the_answer_letter) is one of 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I' or 'J'.",
"options_format": "\n{letter}: {option}" "options_format": "\n{letter}: {option}"
}, },
......
# Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
tag: score_robustness_mmlu_pro
task: score_non_greedy_robustness_mmlu_pro
dataset_path: TIGER-Lab/MMLU-Pro
dataset_name: default
output_type: generate_until
validation_split: validation
test_split: test
process_docs: !function utils_mmlu_pro.non_greedy_robustness_process_docs
doc_to_text: !function utils_mmlu_pro.mmlu_pro_robustness_doc_to_text
doc_to_target: answer
generation_kwargs:
until: []
max_gen_toks: 1024
do_sample: true
temperature: 0.7
process_results: !function utils_mmlu_pro.non_greedy_robustness_process_results
metric_list:
- metric: non_greedy_macro_accuracy
aggregation: !function utils_mmlu_pro.non_greedy_macro_accuracy
higher_is_better: true
metadata:
version: 1.0
dataset_kwargs:
trust_remote_code: true
...@@ -27,6 +27,7 @@ TEMPLATE_FILE_PATH = os.path.join(os.path.dirname(__file__), "prompt_templates.j ...@@ -27,6 +27,7 @@ TEMPLATE_FILE_PATH = os.path.join(os.path.dirname(__file__), "prompt_templates.j
PROMPT_ROBUSTNESS_TEMPLATE_KEY = "prompt_robustness" PROMPT_ROBUSTNESS_TEMPLATE_KEY = "prompt_robustness"
OPTION_ORDER_ROBUSTNESS_TEMPLATE_KEY = "option_order_robustness" OPTION_ORDER_ROBUSTNESS_TEMPLATE_KEY = "option_order_robustness"
NON_GREEDY_ROBUSTNESS_TEMPLATE_KEY = "non_greedy_robustness"
QUESTION_KEY = "question" QUESTION_KEY = "question"
...@@ -48,6 +49,23 @@ option_order_robustness_process_docs = partial( ...@@ -48,6 +49,23 @@ option_order_robustness_process_docs = partial(
templates_key=OPTION_ORDER_ROBUSTNESS_TEMPLATE_KEY, templates_key=OPTION_ORDER_ROBUSTNESS_TEMPLATE_KEY,
labels=LABELS, labels=LABELS,
) )
non_greedy_robustness_process_docs = partial(
utils.non_greedy_robustness_process_docs,
template_file_path=TEMPLATE_FILE_PATH,
templates_key=NON_GREEDY_ROBUSTNESS_TEMPLATE_KEY,
)
def non_greedy_robustness_process_results(doc, results) -> Dict[str, float]:
final_answer = utils.__postprocess_pred(results[0])
final_answer = utils.translate_model_answer_to_labels(
final_answer, option_format=doc["options_format"], labels=LABELS
)
question_id = doc["question_id"]
category = doc["category"]
gt = LABELS[doc["answer_index"]]
return {"non_greedy_macro_accuracy": (question_id, final_answer, gt, category)}
def prompt_robustness_process_results(doc, results) -> Dict[str, float]: def prompt_robustness_process_results(doc, results) -> Dict[str, float]:
...@@ -162,3 +180,18 @@ per_option_macro_accuracy_i = partial(per_option_macro_accuracy, always_opt="I") ...@@ -162,3 +180,18 @@ per_option_macro_accuracy_i = partial(per_option_macro_accuracy, always_opt="I")
per_option_macro_accuracy_j = partial(per_option_macro_accuracy, always_opt="J") per_option_macro_accuracy_j = partial(per_option_macro_accuracy, always_opt="J")
options_consistency_rate = partial(utils.options_consistency_rate, labels=LABELS) options_consistency_rate = partial(utils.options_consistency_rate, labels=LABELS)
def non_greedy_macro_accuracy(results: List[Dict[str, Any]]) -> float:
accuracies = {}
for result in results:
question_id, final_answer, gt, category = result
if category not in accuracies:
accuracies[category] = []
accuracies[category].append(final_answer == gt)
for key in accuracies:
accuracies[key] = sum(accuracies[key]) / len(accuracies[key])
eval_logger.info(f"Non greedy, category - {key} accuracy: {accuracies[key]}")
return np.round(np.mean([v for v in accuracies.values()]), 4)
#!/bin/bash
helpFunction()
{
echo ""
echo "Usage: $0 -m MODEL -t TASK -s SEED -o OUTPUT_DIR"
echo -e "\t-m huggingface model name"
echo -e "\t-t task name one of score_non_greedy_robustness_[agieval|mmlu_pro|math]"
echo -e "\t-s random seed for evaluation [1-5]"
echo -e "\t-o output directory"
exit 1 # Exit script after printing help
}
while getopts "m:t:s:" opt
do
case "$opt" in
m ) MODEL="$OPTARG" ;;
t ) TASK="$OPTARG" ;;
s ) SEED="$OPTARG" ;;
o ) OUTPUT_DIR="$OPTARG" ;;
? ) helpFunction ;; # Print helpFunction in case parameter is non-existent
esac
done
if [ -z "$MODEL" ] | [ -z "$TASK" ] | [ -z "$SEED" ] | [ -z "$OUTPUT_DIR" ]
then
echo "Some or all of the parameters are empty";
helpFunction
fi
echo "evaluating $MODEL on task $TASK with seed $SEED"
echo "output will be saved in $OUTPUT_DIR"
TENSOR_PARALLEL=8
BATCH_SIZE="auto"
echo "running evaluation on vllm with tensor parallelism $TENSOR_PARALLEL"
lm_eval --model vllm \\
--model_args pretrained=$MODEL,dtype=bfloat16,tensor_parallel_size=$TENSOR_PARALLEL,gpu_memory_utilization=0.9,\\
max_model_len=4096,data_parallel_size=1,disable_custom_all_reduce=True,enforce_eager=False,seed=$SEED\\
--apply_chat_template \\
--tasks $TASKS \\
--batch_size $BATCH_SIZE \\
--log_samples \\
--output_path $OUTPUT_DIR \\
# Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import json
import os
from datetime import datetime
from itertools import combinations
from pathlib import Path
from typing import List
import pandas as pd
from lm_eval.tasks.score.math.math_grader import math_equal
from lm_eval.utils import handle_non_serializable, make_table
N_SEEDS = 5
def load_json_logs(file_paths, subtasks):
"""
Loads JSON logs of jsonl format from file paths into a single DataFrame.
Args:
file_paths: List of file paths to the JSON logs.
Returns:
A DataFrame containing the logs.
"""
per_seed_df = {
"question_id": [],
"final_answer_seed_": [],
"gt": [],
"category": [],
}
_search_key = None
for i in range(len(file_paths)):
file_path = file_paths[i]
with open(file_path, "r") as f:
for line in f:
datapoint = json.loads(line)
if _search_key is None:
if "non_greedy_macro_accuracy" in datapoint:
_search_key = "non_greedy_macro_accuracy"
elif "non_greedy_accuracy" in datapoint:
_search_key = "non_greedy_accuracy"
question_id, final_answer, gt, category = datapoint[_search_key]
if subtasks is not None:
category = subtasks[i]
per_seed_df["question_id"].append(question_id)
per_seed_df["final_answer_seed_"].append(final_answer)
per_seed_df["gt"].append(gt)
per_seed_df["category"].append(category)
df = pd.DataFrame(per_seed_df)
return df
def calculate_consistency_rate(responses: List[List[str]]) -> float:
"""
Calculate the Consistency Rate (CR) for a given set of responses.
Args:
responses: List of lists, where each inner list contains responses to the same question.
Returns:
The consistency rate as a float.
"""
total_similarity = 0
total_combinations = 0
for response_set in responses:
pairs = combinations(response_set, 2)
num_pairs = len(response_set) * (len(response_set) - 1) / 2
total_combinations += num_pairs
for answer1, answer2 in pairs:
total_similarity += int(answer1 == answer2)
return total_similarity / total_combinations if total_combinations > 0 else 0.0
def calculate_math_consistency_rate(responses: List[List[str]]) -> float:
"""
Calculate the Consistency Rate (CR) for a given set of responses.
Args:
responses: List of lists, where each inner list contains responses to the same question.
Returns:
The consistency rate as a float.
"""
total_similarity = 0
total_combinations = 0
for response_set in responses:
pairs = combinations(response_set, 2)
num_pairs = len(response_set) * (len(response_set) - 1) / 2
total_combinations += num_pairs
for answer1, answer2 in pairs:
total_similarity += int(math_equal(answer1, answer2))
return total_similarity / total_combinations if total_combinations > 0 else 0.0
def main():
parser = argparse.ArgumentParser(
description="Calculate consistency rate from JSON logs."
)
parser.add_argument(
"--log_dir", help="Path to the directory containing the JSON log files."
)
parser.add_argument("--dataset", help="Dataset name: agieval, mmlu_pro or math")
args = parser.parse_args()
for seed in range(1, N_SEEDS + 1):
# Checking if directories exist
seed_log_dir = os.path.join(args.log_dir, f"seed_{seed}")
assert os.path.exists(
seed_log_dir
), f"No logs found for seed={seed}. No directory found at {seed_log_dir}"
subtasks = None
if args.dataset == "agieval":
agieval_subtasks = [
"aqua_rat",
"logiqa_en",
"lsat_ar",
"lsat_lr",
"lsat_rc",
"sat_en",
"sat_math",
]
subtasks = agieval_subtasks
file_paths = []
for subtask in agieval_subtasks:
log_path = os.path.join(
seed_log_dir,
f"*/samples_non_greedy_robustness_agieval_{subtask}_*.jsonl",
)
subtask_logs = glob.glob(log_path)
if len(subtask_logs) == 0:
raise FileNotFoundError(
f"No logs found for agieval subtask {subtask} for seed={seed} in the path {log_path}."
)
elif len(subtask_logs) > 1:
raise FileExistsError(
f"Multiple logs found for agieval subtask {subtask} for seed={seed}."
)
file_paths.append(subtask_logs[0])
elif args.dataset == "mmlu_pro":
task_logs = glob.glob(
os.path.join(
seed_log_dir,
"*/samples_score_non_greedy_robustness_mmlu_pro_*.jsonl",
)
)
file_paths = []
if len(task_logs) == 0:
raise FileNotFoundError(
f"No logs found for mmlu_pro for seed={seed}. PATH: {seed_log_dir}"
)
elif len(task_logs) > 1:
raise FileExistsError(
f"Multiple logs found for mmlu_pro for seed={seed}."
)
file_paths.append(task_logs[0])
elif args.dataset == "math":
math_subtasks = [
"algebra",
"counting_and_prob",
"geometry",
"intermediate_algebra",
"num_theory",
"prealgebra",
"precalc",
]
subtasks = math_subtasks
file_paths = []
for subtask in math_subtasks:
log_path = os.path.join(
seed_log_dir,
f"*/samples_non_greedy_robustness_math_{subtask}_*.jsonl",
)
subtask_logs = glob.glob(log_path)
if len(subtask_logs) == 0:
raise FileNotFoundError(
f"No logs found for math subtask {subtask} for seed={seed} in the path {log_path}."
)
elif len(subtask_logs) > 1:
raise FileExistsError(
f"Multiple logs found for math subtask {subtask} for seed={seed}."
)
file_paths.append(subtask_logs[0])
else:
raise ValueError(
"Invalid dataset name. only agieval, mmlu_pro and math are supported."
)
df = load_json_logs(file_paths, subtasks)
# merge all dfs by question_id, category and gt
if seed == 1:
df_all = df
df_all[f"final_answer_seed_{seed}"] = df["final_answer_seed_"]
else:
df_all = df_all.merge(
df, on=["question_id", "category"], suffixes=("", seed)
)
responses = df_all[
[f"final_answer_seed_{seed}" for seed in range(1, N_SEEDS + 1)]
].values.tolist()
# calculate per seed accuracy
if args.dataset == "math":
consistency_rate = calculate_math_consistency_rate(responses)
results = {"alias": f"score_non_greedy_robustness_{args.dataset}"}
results.update(
{
"consistency_rate,none": consistency_rate,
"consistency_rate_stderr,none": "N/A",
}
)
for seed in range(1, N_SEEDS + 1):
df_all[f"accuracy_seed_{seed}"] = df_all[
[f"final_answer_seed_{seed}", "gt"]
].apply(lambda x: math_equal(*x), axis=1)
accuracy = df_all[f"accuracy_seed_{seed}"].mean()
results[f"seed_{seed}_accuracy,none"] = accuracy
results[f"seed_{seed}_accuracy_stderr,none"] = "N/A"
else:
consistency_rate = calculate_consistency_rate(responses)
results = {"alias": f"score_non_greedy_robustness_{args.dataset}"}
results.update(
{
"consistency_rate,none": consistency_rate,
"consistency_rate_stderr,none": "N/A",
}
)
for seed in range(1, N_SEEDS + 1):
df_all[f"accuracy_seed_{seed}"] = (
df_all[f"final_answer_seed_{seed}"] == df_all["gt"]
)
accuracy = df_all[f"accuracy_seed_{seed}"].mean()
results[f"seed_{seed}_accuracy,none"] = accuracy
results[f"seed_{seed}_accuracy_stderr,none"] = "N/A"
metrics = [f"seed_{seed}_accuracy" for seed in range(1, N_SEEDS + 1)] + [
"consistency_rate"
]
higher_is_better = {metric: True for metric in metrics}
results_dict = {
"results": {f"score_non_greedy_robustness_{args.dataset}": results},
"group_subtasks": {f"score_non_greedy_robustness_{args.dataset}": []},
"configs": None,
"versions": {f"score_non_greedy_robustness_{args.dataset}": 1},
"n-shot": {f"score_non_greedy_robustness_{args.dataset}": 0},
"higher_is_better": {
f"score_non_greedy_robustness_{args.dataset}": higher_is_better
},
"n-samples": None,
}
dumped = json.dumps(
results_dict,
indent=2,
default=handle_non_serializable,
ensure_ascii=False,
)
path = Path(args.log_dir)
path.mkdir(parents=True, exist_ok=True)
date_id = datetime.now().isoformat().replace(":", "-")
file_results_aggregated = path.joinpath(f"{args.dataset}_results_{date_id}.json")
file_results_aggregated.open("w", encoding="utf-8").write(dumped)
print(make_table(results_dict))
if __name__ == "__main__":
main()
...@@ -130,6 +130,36 @@ def option_order_robustness_process_docs( ...@@ -130,6 +130,36 @@ def option_order_robustness_process_docs(
return doc.map(repeat_doc_swap_correct_answer, batched=True) return doc.map(repeat_doc_swap_correct_answer, batched=True)
def non_greedy_robustness_process_docs(
doc: Dataset,
templates_key: str,
template_file_path: str,
dataset_specific_preprocess: callable = None,
) -> Dataset:
try:
with open(template_file_path) as f:
prompt_template = json.load(f)[templates_key]
prompt = prompt_template["prompt"]
options_format = prompt_template.get("options_format", None)
except FileNotFoundError:
eval_logger.error("Prompt templates not found")
sys.exit()
if dataset_specific_preprocess is not None:
doc = dataset_specific_preprocess(doc)
def add_prompt_col(batched_docs):
initial_len = len(next(iter(batched_docs.values())))
new_batched_docs = copy.deepcopy(batched_docs)
new_batched_docs["prompt"] = [prompt] * initial_len
if options_format is not None:
new_batched_docs["options_format"] = [options_format] * initial_len
return new_batched_docs
return doc.map(add_prompt_col, batched=True)
def robustness_doc_to_text(doc: Dataset) -> str: def robustness_doc_to_text(doc: Dataset) -> str:
upper_case = string.ascii_uppercase upper_case = string.ascii_uppercase
lower_case = string.ascii_lowercase lower_case = string.ascii_lowercase
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment