Unverified Commit 3cab9027 authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Add examples telemetry (#17552)

* Add examples telemetry

* Alternative approach

* Add to all other examples

* Add to templates as well

* Put framework separately

* Same for TensorFlow
parent 9e72eb44
...@@ -43,7 +43,7 @@ from transformers import ( ...@@ -43,7 +43,7 @@ from transformers import (
) )
from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks. # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
...@@ -225,6 +225,10 @@ def main(): ...@@ -225,6 +225,10 @@ def main():
else: else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag", model_args, data_args)
# Setup logging # Setup logging
logging.basicConfig( logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
......
...@@ -51,7 +51,7 @@ from transformers import ( ...@@ -51,7 +51,7 @@ from transformers import (
default_data_collator, default_data_collator,
get_scheduler, get_scheduler,
) )
from transformers.utils import PaddingStrategy, get_full_repo_name from transformers.utils import PaddingStrategy, get_full_repo_name, send_example_telemetry
logger = get_logger(__name__) logger = get_logger(__name__)
...@@ -273,6 +273,10 @@ class DataCollatorForMultipleChoice: ...@@ -273,6 +273,10 @@ class DataCollatorForMultipleChoice:
def main(): def main():
args = parse_args() args = parse_args()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag_no_trainer", args)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
# in the environment # in the environment
......
...@@ -42,7 +42,7 @@ from transformers import ( ...@@ -42,7 +42,7 @@ from transformers import (
set_seed, set_seed,
) )
from transformers.trainer_utils import get_last_checkpoint from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
from utils_qa import postprocess_qa_predictions from utils_qa import postprocess_qa_predictions
...@@ -226,6 +226,10 @@ def main(): ...@@ -226,6 +226,10 @@ def main():
else: else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_qa", model_args, data_args)
# Setup logging # Setup logging
logging.basicConfig( logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
......
...@@ -41,7 +41,7 @@ from transformers import ( ...@@ -41,7 +41,7 @@ from transformers import (
set_seed, set_seed,
) )
from transformers.trainer_utils import get_last_checkpoint from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
from utils_qa import postprocess_qa_predictions_with_beam_search from utils_qa import postprocess_qa_predictions_with_beam_search
...@@ -225,6 +225,10 @@ def main(): ...@@ -225,6 +225,10 @@ def main():
else: else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_qa_beam_search", model_args, data_args)
# Setup logging # Setup logging
logging.basicConfig( logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
......
...@@ -49,7 +49,7 @@ from transformers import ( ...@@ -49,7 +49,7 @@ from transformers import (
default_data_collator, default_data_collator,
get_scheduler, get_scheduler,
) )
from transformers.utils import check_min_version, get_full_repo_name from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
from utils_qa import postprocess_qa_predictions_with_beam_search from utils_qa import postprocess_qa_predictions_with_beam_search
...@@ -291,6 +291,10 @@ def parse_args(): ...@@ -291,6 +291,10 @@ def parse_args():
def main(): def main():
args = parse_args() args = parse_args()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_qa_beam_search_no_trainer", args)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment # If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment
accelerator = Accelerator(log_with="all", logging_dir=args.output_dir) if args.with_tracking else Accelerator() accelerator = Accelerator(log_with="all", logging_dir=args.output_dir) if args.with_tracking else Accelerator()
......
...@@ -50,7 +50,7 @@ from transformers import ( ...@@ -50,7 +50,7 @@ from transformers import (
default_data_collator, default_data_collator,
get_scheduler, get_scheduler,
) )
from transformers.utils import check_min_version, get_full_repo_name from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
from utils_qa import postprocess_qa_predictions from utils_qa import postprocess_qa_predictions
...@@ -329,6 +329,10 @@ def parse_args(): ...@@ -329,6 +329,10 @@ def parse_args():
def main(): def main():
args = parse_args() args = parse_args()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_qa_no_trainer", args)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
# in the environment # in the environment
......
...@@ -39,7 +39,7 @@ from transformers import ( ...@@ -39,7 +39,7 @@ from transformers import (
set_seed, set_seed,
) )
from transformers.trainer_utils import EvalLoopOutput, EvalPrediction, get_last_checkpoint from transformers.trainer_utils import EvalLoopOutput, EvalPrediction, get_last_checkpoint
from transformers.utils import check_min_version from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
...@@ -271,6 +271,10 @@ def main(): ...@@ -271,6 +271,10 @@ def main():
else: else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_seq2seq_qa", model_args, data_args)
# Setup logging # Setup logging
logging.basicConfig( logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
......
...@@ -42,7 +42,7 @@ from transformers import ( ...@@ -42,7 +42,7 @@ from transformers import (
default_data_collator, default_data_collator,
) )
from transformers.trainer_utils import get_last_checkpoint from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
...@@ -266,6 +266,10 @@ def main(): ...@@ -266,6 +266,10 @@ def main():
else: else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_semantic_segmentation", model_args, data_args)
# Setup logging # Setup logging
logging.basicConfig( logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
......
...@@ -44,7 +44,7 @@ from transformers import ( ...@@ -44,7 +44,7 @@ from transformers import (
default_data_collator, default_data_collator,
get_scheduler, get_scheduler,
) )
from transformers.utils import get_full_repo_name from transformers.utils import get_full_repo_name, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
...@@ -315,6 +315,10 @@ def parse_args(): ...@@ -315,6 +315,10 @@ def parse_args():
def main(): def main():
args = parse_args() args = parse_args()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_semantic_segmentation_no_trainer", args)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
# in the environment # in the environment
......
...@@ -43,7 +43,7 @@ from transformers import ( ...@@ -43,7 +43,7 @@ from transformers import (
set_seed, set_seed,
) )
from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices, _sample_negative_indices from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices, _sample_negative_indices
from transformers.utils import get_full_repo_name from transformers.utils import get_full_repo_name, send_example_telemetry
logger = get_logger(__name__) logger = get_logger(__name__)
...@@ -363,6 +363,10 @@ def main(): ...@@ -363,6 +363,10 @@ def main():
# We now keep distinct sets of args, for a cleaner separation of concerns. # We now keep distinct sets of args, for a cleaner separation of concerns.
args = parse_args() args = parse_args()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_wav2vec2_pretraining_no_trainer", args)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator() accelerator = Accelerator()
logger.info(accelerator.state, main_process_only=False) logger.info(accelerator.state, main_process_only=False)
......
...@@ -44,7 +44,7 @@ from transformers import ( ...@@ -44,7 +44,7 @@ from transformers import (
set_seed, set_seed,
) )
from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
...@@ -376,6 +376,10 @@ def main(): ...@@ -376,6 +376,10 @@ def main():
else: else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_speech_recognition_ctc", model_args, data_args)
# Detecting last checkpoint. # Detecting last checkpoint.
last_checkpoint = None last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
......
...@@ -42,7 +42,7 @@ from transformers import ( ...@@ -42,7 +42,7 @@ from transformers import (
set_seed, set_seed,
) )
from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
...@@ -239,6 +239,10 @@ def main(): ...@@ -239,6 +239,10 @@ def main():
else: else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_speech_recognition_seq2seq", model_args, data_args)
# 2. Setup logging # 2. Setup logging
logging.basicConfig( logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
......
...@@ -46,7 +46,7 @@ from transformers import ( ...@@ -46,7 +46,7 @@ from transformers import (
set_seed, set_seed,
) )
from transformers.trainer_utils import get_last_checkpoint from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, is_offline_mode from transformers.utils import check_min_version, is_offline_mode, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
...@@ -302,6 +302,10 @@ def main(): ...@@ -302,6 +302,10 @@ def main():
else: else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_summarization", model_args, data_args)
# Setup logging # Setup logging
logging.basicConfig( logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
......
...@@ -50,7 +50,7 @@ from transformers import ( ...@@ -50,7 +50,7 @@ from transformers import (
SchedulerType, SchedulerType,
get_scheduler, get_scheduler,
) )
from transformers.utils import get_full_repo_name, is_offline_mode from transformers.utils import get_full_repo_name, is_offline_mode, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
...@@ -319,6 +319,10 @@ def parse_args(): ...@@ -319,6 +319,10 @@ def parse_args():
def main(): def main():
args = parse_args() args = parse_args()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_summarization_no_trainer", args)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
# in the environment # in the environment
......
...@@ -42,7 +42,7 @@ from transformers import ( ...@@ -42,7 +42,7 @@ from transformers import (
set_seed, set_seed,
) )
from transformers.trainer_utils import get_last_checkpoint from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
...@@ -215,6 +215,10 @@ def main(): ...@@ -215,6 +215,10 @@ def main():
else: else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_glue", model_args, data_args)
# Setup logging # Setup logging
logging.basicConfig( logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
......
...@@ -42,7 +42,7 @@ from transformers import ( ...@@ -42,7 +42,7 @@ from transformers import (
default_data_collator, default_data_collator,
get_scheduler, get_scheduler,
) )
from transformers.utils import get_full_repo_name from transformers.utils import get_full_repo_name, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
...@@ -205,6 +205,9 @@ def parse_args(): ...@@ -205,6 +205,9 @@ def parse_args():
def main(): def main():
args = parse_args() args = parse_args()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_glue_no_trainer", args)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
......
...@@ -42,7 +42,7 @@ from transformers import ( ...@@ -42,7 +42,7 @@ from transformers import (
set_seed, set_seed,
) )
from transformers.trainer_utils import get_last_checkpoint from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
...@@ -112,8 +112,6 @@ class DataTrainingArguments: ...@@ -112,8 +112,6 @@ class DataTrainingArguments:
) )
}, },
) )
server_ip: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
server_port: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
@dataclass @dataclass
...@@ -176,14 +174,9 @@ def main(): ...@@ -176,14 +174,9 @@ def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup distant debugging if needed # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
if data_args.server_ip and data_args.server_port: # information sent is the one passed as arguments along with your Python/PyTorch versions.
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script send_example_telemetry("run_xnli", model_args)
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(data_args.server_ip, data_args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup logging # Setup logging
logging.basicConfig( logging.basicConfig(
......
...@@ -43,7 +43,7 @@ from transformers import ( ...@@ -43,7 +43,7 @@ from transformers import (
set_seed, set_seed,
) )
from transformers.trainer_utils import get_last_checkpoint from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
...@@ -216,6 +216,10 @@ def main(): ...@@ -216,6 +216,10 @@ def main():
else: else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_ner", model_args, data_args)
# Setup logging # Setup logging
logging.basicConfig( logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
......
...@@ -49,7 +49,7 @@ from transformers import ( ...@@ -49,7 +49,7 @@ from transformers import (
default_data_collator, default_data_collator,
get_scheduler, get_scheduler,
) )
from transformers.utils import get_full_repo_name from transformers.utils import get_full_repo_name, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
...@@ -259,6 +259,10 @@ def parse_args(): ...@@ -259,6 +259,10 @@ def parse_args():
def main(): def main():
args = parse_args() args = parse_args()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_ner_no_trainer", args)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
# in the environment # in the environment
......
...@@ -46,7 +46,7 @@ from transformers import ( ...@@ -46,7 +46,7 @@ from transformers import (
set_seed, set_seed,
) )
from transformers.trainer_utils import get_last_checkpoint from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
...@@ -260,6 +260,10 @@ def main(): ...@@ -260,6 +260,10 @@ def main():
else: else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_translation", model_args, data_args)
# Setup logging # Setup logging
logging.basicConfig( logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment