Unverified Commit 053efc5d authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Fix imports in conversion scripts (#9674)

parent 2390c16f
......@@ -19,8 +19,8 @@ import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
from transformers import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
from transformers.utils import logging
logging.set_verbosity_info()
......
......@@ -23,9 +23,15 @@ import fairseq
import torch
from packaging import version
from ...utils import logging
from . import BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer
from .modeling_bart import _make_linear_from_emb
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.models.bart.modeling_bart import _make_linear_from_emb
from transformers.utils import logging
FAIRSEQ_MODELS = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
......
......@@ -28,8 +28,8 @@ import re
import tensorflow as tf
import torch
from ...utils import logging
from . import BertConfig, BertModel
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
......
......@@ -19,8 +19,8 @@ import argparse
import torch
from ...utils import logging
from . import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
......
......@@ -22,7 +22,7 @@ import numpy as np
import tensorflow as tf
import torch
from . import BertModel
from transformers import BertModel
def convert_pytorch_checkpoint_to_tf(model: BertModel, ckpt_dir: str, model_name: str):
......
......@@ -18,8 +18,8 @@ import argparse
import torch
from ...models.bart import BartConfig, BartForConditionalGeneration
from ...utils import logging
from transformers import BartConfig, BartForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
......
......@@ -17,7 +17,7 @@ import os
import torch
from ...file_utils import WEIGHTS_NAME
from transformers.file_utils import WEIGHTS_NAME
DIALOGPT_MODELS = ["small", "medium", "large"]
......
......@@ -19,8 +19,7 @@ from pathlib import Path
import torch
from torch.serialization import default_restore_location
from ...models.bert import BertConfig
from . import DPRConfig, DPRContextEncoder, DPRQuestionEncoder, DPRReader
from .transformers import BertConfig, DPRConfig, DPRContextEncoder, DPRQuestionEncoder, DPRReader
CheckpointState = collections.namedtuple(
......
......@@ -19,8 +19,8 @@ import argparse
import torch
from ...utils import logging
from . import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra
from transformers import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra
from transformers.utils import logging
logging.set_verbosity_info()
......
......@@ -31,10 +31,11 @@ import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from ...file_utils import WEIGHTS_NAME
from ...tokenization_utils_base import TOKENIZER_CONFIG_FILE
from ...utils import logging
from . import VOCAB_FILES_NAMES, FSMTConfig, FSMTForConditionalGeneration
from transfomers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.file_utils import WEIGHTS_NAME
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import logging
logging.set_verbosity_warning()
......
......@@ -23,6 +23,7 @@ from ...file_utils import _BaseLazyModule, is_tf_available, is_tokenizers_availa
_import_structure = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
......
......@@ -16,14 +16,14 @@
import argparse
import logging
import torch
from . import FunnelConfig, FunnelForPreTraining, load_tf_weights_in_funnel
from transformers import FunnelConfig, FunnelForPreTraining, load_tf_weights_in_funnel
from transformers.utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
......
......@@ -19,9 +19,9 @@ import argparse
import torch
from ...file_utils import CONFIG_NAME, WEIGHTS_NAME
from ...utils import logging
from . import GPT2Config, GPT2Model, load_tf_weights_in_gpt2
from transformers import GPT2Config, GPT2Model, load_tf_weights_in_gpt2
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
......
......@@ -20,7 +20,7 @@ import argparse
import pytorch_lightning as pl
import torch
from . import LongformerForQuestionAnswering, LongformerModel
from transformers import LongformerForQuestionAnswering, LongformerModel
class LightningModel(pl.LightningModule):
......
......@@ -16,14 +16,14 @@
import argparse
import logging
import torch
from . import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
......
......@@ -17,7 +17,7 @@ import os
from pathlib import Path
from typing import List, Tuple
from .convert_marian_to_pytorch import (
from transformers.models.marian.convert_marian_to_pytorch import (
FRONT_MATTER_TEMPLATE,
_parse_readme,
convert_all_sentencepiece_models,
......
......@@ -26,8 +26,8 @@ import numpy as np
import torch
from tqdm import tqdm
from ...hf_api import HfApi
from . import MarianConfig, MarianMTModel, MarianTokenizer
from transformers import MarianConfig, MarianMTModel, MarianTokenizer
from transformers.hf_api import HfApi
def remove_suffix(text: str, suffix: str):
......
......@@ -16,9 +16,8 @@ import argparse
import torch
from ..bart import BartForConditionalGeneration
from ..bart.convert_bart_original_pytorch_checkpoint_to_pytorch import remove_ignore_keys_
from . import MBartConfig
from transformers import BartForConditionalGeneration, MBartConfig
from transformers.models.bart.convert_bart_original_pytorch_checkpoint_to_pytorch import remove_ignore_keys_
def convert_fairseq_mbart_checkpoint_from_disk(checkpoint_path, hf_config_path="facebook/mbart-large-en-ro"):
......
......@@ -16,8 +16,8 @@ import argparse
import torch
from ...utils import logging
from . import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
......
......@@ -22,8 +22,8 @@ import tensorflow as tf
import torch
from tqdm import tqdm
from . import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from .configuration_pegasus import DEFAULTS, task_specific_params
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
PATTERNS = [
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment