"...git@developer.sourcefind.cn:chenpangpang/open-webui.git" did not exist on "d17dc5924686e3c183d80a6f522cbcf7a22e2f0f"
Commit 0bab55d5 authored by thomwolf's avatar thomwolf
Browse files

[BIG] name change

parent 9113b50c
version: 2 version: 2
jobs: jobs:
build_py3: build_py3:
working_directory: ~/pytorch-pretrained-BERT working_directory: ~/pytorch-transformers
docker: docker:
- image: circleci/python:3.5 - image: circleci/python:3.5
steps: steps:
...@@ -10,11 +10,11 @@ jobs: ...@@ -10,11 +10,11 @@ jobs:
- run: sudo pip install pytest codecov pytest-cov - run: sudo pip install pytest codecov pytest-cov
- run: sudo pip install spacy ftfy==4.4.3 - run: sudo pip install spacy ftfy==4.4.3
- run: sudo python -m spacy download en - run: sudo python -m spacy download en
- run: python -m pytest -sv ./pytorch_pretrained_bert/tests/ --cov - run: python -m pytest -sv ./pytorch_transformers/tests/ --cov
- run: codecov - run: codecov
parallelism: 4 parallelism: 4
build_py2: build_py2:
working_directory: ~/pytorch-pretrained-BERT working_directory: ~/pytorch-transformers
docker: docker:
- image: circleci/python:2.7 - image: circleci/python:2.7
steps: steps:
...@@ -23,7 +23,7 @@ jobs: ...@@ -23,7 +23,7 @@ jobs:
- run: sudo pip install pytest codecov pytest-cov - run: sudo pip install pytest codecov pytest-cov
- run: sudo pip install spacy ftfy==4.4.3 - run: sudo pip install spacy ftfy==4.4.3
- run: sudo python -m spacy download en - run: sudo python -m spacy download en
- run: python -m pytest -sv ./pytorch_pretrained_bert/tests/ --cov - run: python -m pytest -sv ./pytorch_transformers/tests/ --cov
- run: codecov - run: codecov
parallelism: 4 parallelism: 4
workflows: workflows:
......
[run] [run]
source=pytorch_pretrained_bert source=pytorch_transformers
[report] [report]
exclude_lines = exclude_lines =
pragma: no cover pragma: no cover
......
This diff is collapsed.
...@@ -2,6 +2,6 @@ FROM pytorch/pytorch:latest ...@@ -2,6 +2,6 @@ FROM pytorch/pytorch:latest
RUN git clone https://github.com/NVIDIA/apex.git && cd apex && python setup.py install --cuda_ext --cpp_ext RUN git clone https://github.com/NVIDIA/apex.git && cd apex && python setup.py install --cuda_ext --cpp_ext
RUN pip install pytorch-pretrained-bert RUN pip install pytorch_transformers
WORKDIR /workspace WORKDIR /workspace
\ No newline at end of file
...@@ -12,7 +12,7 @@ from torch.utils.data import DataLoader, SequentialSampler, TensorDataset, Subse ...@@ -12,7 +12,7 @@ from torch.utils.data import DataLoader, SequentialSampler, TensorDataset, Subse
from torch.utils.data.distributed import DistributedSampler from torch.utils.data.distributed import DistributedSampler
from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import CrossEntropyLoss, MSELoss
from pytorch_pretrained_bert import BertForSequenceClassification, BertTokenizer from pytorch_transformers import BertForSequenceClassification, BertTokenizer
from utils_glue import processors, output_modes, convert_examples_to_features, compute_metrics from utils_glue import processors, output_modes, convert_examples_to_features, compute_metrics
......
import torch import torch
from torch.nn import functional as F from torch.nn import functional as F
from pytorch_pretrained_bert import XLNetModel, XLNetLMHeadModel, XLNetTokenizer from pytorch_transformers import XLNetModel, XLNetLMHeadModel, XLNetTokenizer
import logging import logging
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
......
...@@ -13,10 +13,10 @@ from torch.utils.data import DataLoader, Dataset, RandomSampler ...@@ -13,10 +13,10 @@ from torch.utils.data import DataLoader, Dataset, RandomSampler
from torch.utils.data.distributed import DistributedSampler from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm from tqdm import tqdm
from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_bert import BertForPreTraining from pytorch_transformers.modeling_bert import BertForPreTraining
from pytorch_pretrained_bert.tokenization_bert import BertTokenizer from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule
InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids lm_label_ids is_next") InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids lm_label_ids is_next")
......
...@@ -5,7 +5,7 @@ from tempfile import TemporaryDirectory ...@@ -5,7 +5,7 @@ from tempfile import TemporaryDirectory
import shelve import shelve
from random import random, randrange, randint, shuffle, choice from random import random, randrange, randint, shuffle, choice
from pytorch_pretrained_bert.tokenization_bert import BertTokenizer from pytorch_transformers.tokenization_bert import BertTokenizer
import numpy as np import numpy as np
import json import json
import collections import collections
......
...@@ -29,10 +29,10 @@ from torch.utils.data import DataLoader, Dataset, RandomSampler ...@@ -29,10 +29,10 @@ from torch.utils.data import DataLoader, Dataset, RandomSampler
from torch.utils.data.distributed import DistributedSampler from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange from tqdm import tqdm, trange
from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_bert import BertForPreTraining from pytorch_transformers.modeling_bert import BertForPreTraining
from pytorch_pretrained_bert.tokenization_bert import BertTokenizer from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S', datefmt='%m/%d/%Y %H:%M:%S',
......
...@@ -34,10 +34,10 @@ from torch.nn import CrossEntropyLoss, MSELoss ...@@ -34,10 +34,10 @@ from torch.nn import CrossEntropyLoss, MSELoss
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_bert import BertForSequenceClassification from pytorch_transformers.modeling_bert import BertForSequenceClassification
from pytorch_pretrained_bert.tokenization_bert import BertTokenizer from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule
from utils_glue import processors, output_modes, convert_examples_to_features, compute_metrics from utils_glue import processors, output_modes, convert_examples_to_features, compute_metrics
......
...@@ -28,8 +28,8 @@ import torch ...@@ -28,8 +28,8 @@ import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from torch.utils.data.distributed import DistributedSampler from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization_bert import BertTokenizer from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_pretrained_bert.modeling_bert import BertModel from pytorch_transformers.modeling_bert import BertModel
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S', datefmt = '%m/%d/%Y %H:%M:%S',
......
...@@ -33,10 +33,10 @@ from tqdm import tqdm, trange ...@@ -33,10 +33,10 @@ from tqdm import tqdm, trange
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_bert import BertForQuestionAnswering from pytorch_transformers.modeling_bert import BertForQuestionAnswering
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule
from pytorch_pretrained_bert.tokenization_bert import BertTokenizer from pytorch_transformers.tokenization_bert import BertTokenizer
from utils_squad import read_squad_examples, convert_examples_to_features, RawResult, write_predictions from utils_squad import read_squad_examples, convert_examples_to_features, RawResult, write_predictions
......
...@@ -32,10 +32,10 @@ from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, ...@@ -32,10 +32,10 @@ from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
from torch.utils.data.distributed import DistributedSampler from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange from tqdm import tqdm, trange
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_bert import BertForMultipleChoice, BertConfig from pytorch_transformers.modeling_bert import BertForMultipleChoice, BertConfig
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule
from pytorch_pretrained_bert.tokenization_bert import BertTokenizer from pytorch_transformers.tokenization_bert import BertTokenizer
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S', datefmt = '%m/%d/%Y %H:%M:%S',
......
...@@ -8,7 +8,7 @@ import torch ...@@ -8,7 +8,7 @@ import torch
import torch.nn.functional as F import torch.nn.functional as F
import numpy as np import numpy as np
from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer from pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S', datefmt = '%m/%d/%Y %H:%M:%S',
......
...@@ -39,7 +39,7 @@ import torch ...@@ -39,7 +39,7 @@ import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset) TensorDataset)
from pytorch_pretrained_bert import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, from pytorch_transformers import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
OpenAIAdam, cached_path, WEIGHTS_NAME, CONFIG_NAME) OpenAIAdam, cached_path, WEIGHTS_NAME, CONFIG_NAME)
ROCSTORIES_URL = "https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz" ROCSTORIES_URL = "https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz"
......
...@@ -28,7 +28,7 @@ import math ...@@ -28,7 +28,7 @@ import math
import torch import torch
from pytorch_pretrained_bert import TransfoXLLMHeadModel, TransfoXLCorpus, TransfoXLTokenizer from pytorch_transformers import TransfoXLLMHeadModel, TransfoXLCorpus, TransfoXLTokenizer
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S', datefmt = '%m/%d/%Y %H:%M:%S',
......
...@@ -34,10 +34,10 @@ from torch.nn import CrossEntropyLoss, MSELoss ...@@ -34,10 +34,10 @@ from torch.nn import CrossEntropyLoss, MSELoss
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_xlnet import XLNetForSequenceClassification from pytorch_transformers.modeling_xlnet import XLNetForSequenceClassification
from pytorch_pretrained_bert.tokenization_xlnet import XLNetTokenizer from pytorch_transformers.tokenization_xlnet import XLNetTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule
from utils_glue import processors, output_modes, convert_examples_to_features, compute_metrics from utils_glue import processors, output_modes, convert_examples_to_features, compute_metrics
......
...@@ -33,10 +33,10 @@ from tqdm import tqdm, trange ...@@ -33,10 +33,10 @@ from tqdm import tqdm, trange
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_xlnet import BertForQuestionAnswering from pytorch_transformers.modeling_xlnet import BertForQuestionAnswering
from pytorch_pretrained_bert.tokenization_xlnet import XLNetTokenizer from pytorch_transformers.tokenization_xlnet import XLNetTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule
from utils_squad import read_squad_examples, convert_examples_to_features, RawResult, write_predictions from utils_squad import read_squad_examples, convert_examples_to_features, RawResult, write_predictions
......
# coding=utf-8
# Copyright 2018 HuggingFace Inc..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import json
import random
import shutil
import pytest
import torch
from pytorch_transformers import PretrainedConfig, PreTrainedModel
from pytorch_transformers.modeling_bert import BertModel, BertConfig, PRETRAINED_MODEL_ARCHIVE_MAP, PRETRAINED_CONFIG_ARCHIVE_MAP
class ModelUtilsTest(unittest.TestCase):
def test_model_from_pretrained(self):
for model_name in list(PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
config = BertConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, PretrainedConfig)
model = BertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, PreTrainedModel)
config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
self.assertEqual(model.config.output_attentions, True)
self.assertEqual(model.config.output_hidden_states, True)
self.assertEqual(model.config, config)
if __name__ == "__main__":
unittest.main()
...@@ -24,7 +24,7 @@ import math ...@@ -24,7 +24,7 @@ import math
import collections import collections
from io import open from io import open
from pytorch_pretrained_bert.tokenization_bert import BasicTokenizer, whitespace_tokenize from pytorch_transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment