"...csrc/common/git@developer.sourcefind.cn:OpenDAS/mmcv.git" did not exist on "63a6cbe9134ea35a132da1c37f690d1c73b34d4d"
Commit 0731fa15 authored by Julien Plu's avatar Julien Plu Committed by Lysandre Debut
Browse files

Apply quality and style requirements

parent a3998e76
...@@ -29,10 +29,8 @@ from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config ...@@ -29,10 +29,8 @@ from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config
from .configuration_mmbt import MMBTConfig from .configuration_mmbt import MMBTConfig
from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig
from .configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig
from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
# Configurations # Configurations
from .configuration_utils import PretrainedConfig from .configuration_utils import PretrainedConfig
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig
...@@ -57,7 +55,6 @@ from .data import ( ...@@ -57,7 +55,6 @@ from .data import (
xnli_processors, xnli_processors,
xnli_tasks_num_labels, xnli_tasks_num_labels,
) )
# Files and general utilities # Files and general utilities
from .file_utils import ( from .file_utils import (
CONFIG_NAME, CONFIG_NAME,
...@@ -74,10 +71,8 @@ from .file_utils import ( ...@@ -74,10 +71,8 @@ from .file_utils import (
is_tf_available, is_tf_available,
is_torch_available, is_torch_available,
) )
# Model Cards # Model Cards
from .modelcard import ModelCard from .modelcard import ModelCard
# TF 2.0 <=> PyTorch conversion utilities # TF 2.0 <=> PyTorch conversion utilities
from .modeling_tf_pytorch_utils import ( from .modeling_tf_pytorch_utils import (
convert_tf_weight_name_to_pt_weight_name, convert_tf_weight_name_to_pt_weight_name,
...@@ -88,7 +83,6 @@ from .modeling_tf_pytorch_utils import ( ...@@ -88,7 +83,6 @@ from .modeling_tf_pytorch_utils import (
load_tf2_model_in_pytorch_model, load_tf2_model_in_pytorch_model,
load_tf2_weights_in_pytorch_model, load_tf2_weights_in_pytorch_model,
) )
# Pipelines # Pipelines
from .pipelines import ( from .pipelines import (
CsvPipelineDataFormat, CsvPipelineDataFormat,
...@@ -114,7 +108,6 @@ from .tokenization_openai import OpenAIGPTTokenizer ...@@ -114,7 +108,6 @@ from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_roberta import RobertaTokenizer from .tokenization_roberta import RobertaTokenizer
from .tokenization_t5 import T5Tokenizer from .tokenization_t5 import T5Tokenizer
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
# Tokenizers # Tokenizers
from .tokenization_utils import PreTrainedTokenizer from .tokenization_utils import PreTrainedTokenizer
from .tokenization_xlm import XLMTokenizer from .tokenization_xlm import XLMTokenizer
......
...@@ -22,12 +22,12 @@ import os ...@@ -22,12 +22,12 @@ import os
from transformers import ( from transformers import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
...@@ -35,17 +35,18 @@ from transformers import ( ...@@ -35,17 +35,18 @@ from transformers import (
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig, AlbertConfig,
BertConfig, BertConfig,
CamembertConfig,
CTRLConfig, CTRLConfig,
DistilBertConfig, DistilBertConfig,
GPT2Config, GPT2Config,
OpenAIGPTConfig, OpenAIGPTConfig,
RobertaConfig, RobertaConfig,
CamembertConfig,
T5Config, T5Config,
TFAlbertForMaskedLM, TFAlbertForMaskedLM,
TFBertForPreTraining, TFBertForPreTraining,
TFBertForQuestionAnswering, TFBertForQuestionAnswering,
TFBertForSequenceClassification, TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel, TFCTRLLMHeadModel,
TFDistilBertForMaskedLM, TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering, TFDistilBertForQuestionAnswering,
...@@ -53,8 +54,6 @@ from transformers import ( ...@@ -53,8 +54,6 @@ from transformers import (
TFOpenAIGPTLMHeadModel, TFOpenAIGPTLMHeadModel,
TFRobertaForMaskedLM, TFRobertaForMaskedLM,
TFRobertaForSequenceClassification, TFRobertaForSequenceClassification,
TFCamembertForMaskedLM,
TFCamembertForSequenceClassification,
TFT5WithLMHeadModel, TFT5WithLMHeadModel,
TFTransfoXLLMHeadModel, TFTransfoXLLMHeadModel,
TFXLMRobertaForMaskedLM, TFXLMRobertaForMaskedLM,
......
...@@ -18,8 +18,6 @@ ...@@ -18,8 +18,6 @@
import logging import logging
import tensorflow as tf
from .configuration_camembert import CamembertConfig from .configuration_camembert import CamembertConfig
from .file_utils import add_start_docstrings from .file_utils import add_start_docstrings
from .modeling_tf_roberta import ( from .modeling_tf_roberta import (
...@@ -29,10 +27,11 @@ from .modeling_tf_roberta import ( ...@@ -29,10 +27,11 @@ from .modeling_tf_roberta import (
TFRobertaModel, TFRobertaModel,
) )
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
#"camembert-base": "https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-tf_model.h5" # "camembert-base": "https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-tf_model.h5"
} }
......
...@@ -52,7 +52,6 @@ from utils_squad import ( ...@@ -52,7 +52,6 @@ from utils_squad import (
write_predictions, write_predictions,
write_predictions_extended, write_predictions_extended,
) )
# The follwing import is the official SQuAD evaluation script (2.0). # The follwing import is the official SQuAD evaluation script (2.0).
# You can remove it from the dependencies if you are using this script outside of the library # You can remove it from the dependencies if you are using this script outside of the library
# We've added it here for automated tests (see examples/test_examples.py file) # We've added it here for automated tests (see examples/test_examples.py file)
...@@ -333,7 +332,8 @@ def evaluate(args, model, tokenizer, prefix=""): ...@@ -333,7 +332,8 @@ def evaluate(args, model, tokenizer, prefix=""):
def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):
if args.local_rank not in [-1, 0] and not evaluate: if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
# Load data features from cache or dataset file # Load data features from cache or dataset file
input_file = args.predict_file if evaluate else args.train_file input_file = args.predict_file if evaluate else args.train_file
...@@ -366,7 +366,8 @@ def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=Fal ...@@ -366,7 +366,8 @@ def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=Fal
torch.save(features, cached_features_file) torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate: if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
# Convert to Tensors and build dataset # Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
...@@ -620,7 +621,8 @@ def main(): ...@@ -620,7 +621,8 @@ def main():
# Load pretrained model and tokenizer # Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]: if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() # Make sure only the first process in distributed training will
# download model & vocab
args.model_type = args.model_type.lower() args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
...@@ -641,15 +643,16 @@ def main(): ...@@ -641,15 +643,16 @@ def main():
) )
if args.local_rank == 0: if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() # Make sure only the first process in distributed training will
# download model & vocab
model.to(args.device) model.to(args.device)
logger.info("Training/evaluation parameters %s", args) logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # if args.fp16 is set. Otherwise it'll default to "promote" mode, and we'll get fp32 operations.
# remove the need for this code, but it is still valid. # Note that running `--fp16_opt_level="O2"` will remove the need for this code, but it is still valid.
if args.fp16: if args.fp16:
try: try:
import apex import apex
......
...@@ -21,7 +21,6 @@ import logging ...@@ -21,7 +21,6 @@ import logging
import math import math
from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize
# Required by XLNet evaluation method to compute optimal threshold (see write_predictions_extended() method) # Required by XLNet evaluation method to compute optimal threshold (see write_predictions_extended() method)
from utils_squad_evaluate import find_all_best_thresh_v2, get_raw_scores, make_qid_to_has_ans from utils_squad_evaluate import find_all_best_thresh_v2, get_raw_scores, make_qid_to_has_ans
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment