Commit 0bab55d5 authored by thomwolf's avatar thomwolf
Browse files

[BIG] name change

parent 9113b50c
...@@ -20,9 +20,9 @@ import unittest ...@@ -20,9 +20,9 @@ import unittest
import torch import torch
from pytorch_pretrained_bert import BertAdam from pytorch_transformers import BertAdam
from pytorch_pretrained_bert import OpenAIAdam from pytorch_transformers import OpenAIAdam
from pytorch_pretrained_bert.optimization import ConstantLR, WarmupLinearSchedule, WarmupConstantSchedule, \ from pytorch_transformers.optimization import ConstantLR, WarmupLinearSchedule, WarmupConstantSchedule, \
WarmupCosineWithWarmupRestartsSchedule, WarmupCosineWithHardRestartsSchedule, WarmupCosineSchedule WarmupCosineWithWarmupRestartsSchedule, WarmupCosineWithHardRestartsSchedule, WarmupCosineSchedule
import numpy as np import numpy as np
......
...@@ -20,7 +20,7 @@ from io import open ...@@ -20,7 +20,7 @@ from io import open
import shutil import shutil
import pytest import pytest
from pytorch_pretrained_bert.tokenization_bert import (BasicTokenizer, from pytorch_transformers.tokenization_bert import (BasicTokenizer,
BertTokenizer, BertTokenizer,
WordpieceTokenizer, WordpieceTokenizer,
_is_control, _is_punctuation, _is_control, _is_punctuation,
...@@ -51,7 +51,7 @@ class TokenizationTest(unittest.TestCase): ...@@ -51,7 +51,7 @@ class TokenizationTest(unittest.TestCase):
@pytest.mark.slow @pytest.mark.slow
def test_tokenizer_from_pretrained(self): def test_tokenizer_from_pretrained(self):
cache_dir = "/tmp/pytorch_pretrained_bert_test/" cache_dir = "/tmp/pytorch_transformers_test/"
for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]: for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]:
tokenizer = BertTokenizer.from_pretrained(model_name, cache_dir=cache_dir) tokenizer = BertTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
shutil.rmtree(cache_dir) shutil.rmtree(cache_dir)
......
...@@ -20,7 +20,7 @@ import json ...@@ -20,7 +20,7 @@ import json
import shutil import shutil
import pytest import pytest
from pytorch_pretrained_bert.tokenization_gpt2 import GPT2Tokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP from pytorch_transformers.tokenization_gpt2 import GPT2Tokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP
from .tokenization_tests_commons import create_and_check_tokenizer_commons from .tokenization_tests_commons import create_and_check_tokenizer_commons
...@@ -58,7 +58,7 @@ class GPT2TokenizationTest(unittest.TestCase): ...@@ -58,7 +58,7 @@ class GPT2TokenizationTest(unittest.TestCase):
# @pytest.mark.slow # @pytest.mark.slow
def test_tokenizer_from_pretrained(self): def test_tokenizer_from_pretrained(self):
cache_dir = "/tmp/pytorch_pretrained_bert_test/" cache_dir = "/tmp/pytorch_transformers_test/"
for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]: for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]:
tokenizer = GPT2Tokenizer.from_pretrained(model_name, cache_dir=cache_dir) tokenizer = GPT2Tokenizer.from_pretrained(model_name, cache_dir=cache_dir)
shutil.rmtree(cache_dir) shutil.rmtree(cache_dir)
......
...@@ -20,7 +20,7 @@ import json ...@@ -20,7 +20,7 @@ import json
import shutil import shutil
import pytest import pytest
from pytorch_pretrained_bert.tokenization_openai import OpenAIGPTTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP from pytorch_transformers.tokenization_openai import OpenAIGPTTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP
from.tokenization_tests_commons import create_and_check_tokenizer_commons from.tokenization_tests_commons import create_and_check_tokenizer_commons
...@@ -60,7 +60,7 @@ class OpenAIGPTTokenizationTest(unittest.TestCase): ...@@ -60,7 +60,7 @@ class OpenAIGPTTokenizationTest(unittest.TestCase):
@pytest.mark.slow @pytest.mark.slow
def test_tokenizer_from_pretrained(self): def test_tokenizer_from_pretrained(self):
cache_dir = "/tmp/pytorch_pretrained_bert_test/" cache_dir = "/tmp/pytorch_transformers_test/"
for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]: for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]:
tokenizer = OpenAIGPTTokenizer.from_pretrained(model_name, cache_dir=cache_dir) tokenizer = OpenAIGPTTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
shutil.rmtree(cache_dir) shutil.rmtree(cache_dir)
......
...@@ -20,7 +20,7 @@ from io import open ...@@ -20,7 +20,7 @@ from io import open
import shutil import shutil
import pytest import pytest
from pytorch_pretrained_bert.tokenization_transfo_xl import TransfoXLTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP from pytorch_transformers.tokenization_transfo_xl import TransfoXLTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP
from.tokenization_tests_commons import create_and_check_tokenizer_commons from.tokenization_tests_commons import create_and_check_tokenizer_commons
...@@ -61,7 +61,7 @@ class TransfoXLTokenizationTest(unittest.TestCase): ...@@ -61,7 +61,7 @@ class TransfoXLTokenizationTest(unittest.TestCase):
@pytest.mark.slow @pytest.mark.slow
def test_tokenizer_from_pretrained(self): def test_tokenizer_from_pretrained(self):
cache_dir = "/tmp/pytorch_pretrained_bert_test/" cache_dir = "/tmp/pytorch_transformers_test/"
for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]: for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]:
tokenizer = TransfoXLTokenizer.from_pretrained(model_name, cache_dir=cache_dir) tokenizer = TransfoXLTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
shutil.rmtree(cache_dir) shutil.rmtree(cache_dir)
......
...@@ -20,7 +20,7 @@ import json ...@@ -20,7 +20,7 @@ import json
import shutil import shutil
import pytest import pytest
from pytorch_pretrained_bert.tokenization_xlm import XLMTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP from pytorch_transformers.tokenization_xlm import XLMTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP
from.tokenization_tests_commons import create_and_check_tokenizer_commons from.tokenization_tests_commons import create_and_check_tokenizer_commons
...@@ -59,7 +59,7 @@ class XLMTokenizationTest(unittest.TestCase): ...@@ -59,7 +59,7 @@ class XLMTokenizationTest(unittest.TestCase):
@pytest.mark.slow @pytest.mark.slow
def test_tokenizer_from_pretrained(self): def test_tokenizer_from_pretrained(self):
cache_dir = "/tmp/pytorch_pretrained_bert_test/" cache_dir = "/tmp/pytorch_transformers_test/"
for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]: for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]:
tokenizer = XLMTokenizer.from_pretrained(model_name, cache_dir=cache_dir) tokenizer = XLMTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
shutil.rmtree(cache_dir) shutil.rmtree(cache_dir)
......
...@@ -19,7 +19,7 @@ import unittest ...@@ -19,7 +19,7 @@ import unittest
import shutil import shutil
import pytest import pytest
from pytorch_pretrained_bert.tokenization_xlnet import (XLNetTokenizer, from pytorch_transformers.tokenization_xlnet import (XLNetTokenizer,
PRETRAINED_VOCAB_ARCHIVE_MAP, PRETRAINED_VOCAB_ARCHIVE_MAP,
SPIECE_UNDERLINE) SPIECE_UNDERLINE)
...@@ -62,7 +62,7 @@ class XLNetTokenizationTest(unittest.TestCase): ...@@ -62,7 +62,7 @@ class XLNetTokenizationTest(unittest.TestCase):
@pytest.mark.slow @pytest.mark.slow
def test_tokenizer_from_pretrained(self): def test_tokenizer_from_pretrained(self):
cache_dir = "/tmp/pytorch_pretrained_bert_test/" cache_dir = "/tmp/pytorch_transformers_test/"
for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]: for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]:
tokenizer = XLNetTokenizer.from_pretrained(model_name, cache_dir=cache_dir) tokenizer = XLNetTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
shutil.rmtree(cache_dir) shutil.rmtree(cache_dir)
......
...@@ -37,16 +37,16 @@ from io import open ...@@ -37,16 +37,16 @@ from io import open
from setuptools import find_packages, setup from setuptools import find_packages, setup
setup( setup(
name="pytorch_pretrained_bert", name="pytorch_transformers",
version="0.6.2", version="0.7.0",
author="Thomas Wolf, Victor Sanh, Tim Rault, Google AI Language Team Authors, Open AI team Authors", author="Thomas Wolf, Lysandre Debut, Victor Sanh, Tim Rault, Google AI Language Team Authors, Open AI team Authors",
author_email="thomas@huggingface.co", author_email="thomas@huggingface.co",
description="PyTorch version of Google AI BERT model with script to load Google pre-trained models", description="PyTorch version of Google AI BERT model with script to load Google pre-trained models",
long_description=open("README.md", "r", encoding='utf-8').read(), long_description=open("README.md", "r", encoding='utf-8').read(),
long_description_content_type="text/markdown", long_description_content_type="text/markdown",
keywords='BERT NLP deep learning google', keywords='BERT NLP deep learning google',
license='Apache', license='Apache',
url="https://github.com/huggingface/pytorch-pretrained-BERT", url="https://github.com/huggingface/pytorch-transformers",
packages=find_packages(exclude=["*.tests", "*.tests.*", packages=find_packages(exclude=["*.tests", "*.tests.*",
"tests.*", "tests"]), "tests.*", "tests"]),
install_requires=['torch>=0.4.1', install_requires=['torch>=0.4.1',
...@@ -58,7 +58,7 @@ setup( ...@@ -58,7 +58,7 @@ setup(
'sentencepiece'], 'sentencepiece'],
entry_points={ entry_points={
'console_scripts': [ 'console_scripts': [
"pytorch_pretrained_bert=pytorch_pretrained_bert.__main__:main", "pytorch_transformers=pytorch_transformers.__main__:main",
] ]
}, },
# python_requires='>=3.5.0', # python_requires='>=3.5.0',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment