Unverified Commit 91a67b75 authored by Julien Plu's avatar Julien Plu Committed by GitHub
Browse files

Use LF instead of os.linesep (#8491)

parent 27b3ff31
...@@ -42,7 +42,7 @@ def find_code_in_transformers(object_name): ...@@ -42,7 +42,7 @@ def find_code_in_transformers(object_name):
f"`object_name` should begin with the name of a module of transformers but got {object_name}." f"`object_name` should begin with the name of a module of transformers but got {object_name}."
) )
with open(os.path.join(TRANSFORMERS_PATH, f"{module}.py"), "r", encoding="utf-8") as f: with open(os.path.join(TRANSFORMERS_PATH, f"{module}.py"), "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines() lines = f.readlines()
# Now let's find the class / func in the code! # Now let's find the class / func in the code!
...@@ -82,10 +82,10 @@ def blackify(code): ...@@ -82,10 +82,10 @@ def blackify(code):
code = f"class Bla:\n{code}" code = f"class Bla:\n{code}"
with tempfile.TemporaryDirectory() as d: with tempfile.TemporaryDirectory() as d:
fname = os.path.join(d, "tmp.py") fname = os.path.join(d, "tmp.py")
with open(fname, "w", encoding="utf-8") as f: with open(fname, "w", encoding="utf-8", newline="\n") as f:
f.write(code) f.write(code)
os.system(f"black -q --line-length 119 --target-version py35 {fname}") os.system(f"black -q --line-length 119 --target-version py35 {fname}")
with open(fname, "r", encoding="utf-8") as f: with open(fname, "r", encoding="utf-8", newline="\n") as f:
result = f.read() result = f.read()
return result[len("class Bla:\n") :] if has_indent else result return result[len("class Bla:\n") :] if has_indent else result
...@@ -96,7 +96,7 @@ def is_copy_consistent(filename, overwrite=False): ...@@ -96,7 +96,7 @@ def is_copy_consistent(filename, overwrite=False):
Return the differences or overwrites the content depending on `overwrite`. Return the differences or overwrites the content depending on `overwrite`.
""" """
with open(filename, "r", encoding="utf-8") as f: with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines() lines = f.readlines()
diffs = [] diffs = []
line_index = 0 line_index = 0
...@@ -150,7 +150,7 @@ def is_copy_consistent(filename, overwrite=False): ...@@ -150,7 +150,7 @@ def is_copy_consistent(filename, overwrite=False):
if overwrite and len(diffs) > 0: if overwrite and len(diffs) > 0:
# Warn the user a file has been modified. # Warn the user a file has been modified.
print(f"Detected changes, rewriting {filename}.") print(f"Detected changes, rewriting {filename}.")
with open(filename, "w", encoding="utf-8") as f: with open(filename, "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines) f.writelines(lines)
return diffs return diffs
...@@ -176,7 +176,7 @@ def get_model_list(): ...@@ -176,7 +176,7 @@ def get_model_list():
# If the introduction or the conclusion of the list change, the prompts may need to be updated. # If the introduction or the conclusion of the list change, the prompts may need to be updated.
_start_prompt = "🤗 Transformers currently provides the following architectures" _start_prompt = "🤗 Transformers currently provides the following architectures"
_end_prompt = "1. Want to contribute a new model?" _end_prompt = "1. Want to contribute a new model?"
with open(os.path.join(REPO_PATH, "README.md"), "r", encoding="utf-8") as f: with open(os.path.join(REPO_PATH, "README.md"), "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines() lines = f.readlines()
# Find the start of the list. # Find the start of the list.
start_index = 0 start_index = 0
...@@ -254,7 +254,7 @@ def check_model_list_copy(overwrite=False, max_per_line=119): ...@@ -254,7 +254,7 @@ def check_model_list_copy(overwrite=False, max_per_line=119):
""" Check the model lists in the README and index.rst are consistent and maybe `overwrite`. """ """ Check the model lists in the README and index.rst are consistent and maybe `overwrite`. """
_start_prompt = " This list is updated automatically from the README" _start_prompt = " This list is updated automatically from the README"
_end_prompt = ".. toctree::" _end_prompt = ".. toctree::"
with open(os.path.join(PATH_TO_DOCS, "index.rst"), "r", encoding="utf-8") as f: with open(os.path.join(PATH_TO_DOCS, "index.rst"), "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines() lines = f.readlines()
# Find the start of the list. # Find the start of the list.
start_index = 0 start_index = 0
...@@ -279,7 +279,7 @@ def check_model_list_copy(overwrite=False, max_per_line=119): ...@@ -279,7 +279,7 @@ def check_model_list_copy(overwrite=False, max_per_line=119):
if converted_list != rst_list: if converted_list != rst_list:
if overwrite: if overwrite:
with open(os.path.join(PATH_TO_DOCS, "index.rst"), "w", encoding="utf-8") as f: with open(os.path.join(PATH_TO_DOCS, "index.rst"), "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines[:start_index] + [converted_list] + lines[end_index:]) f.writelines(lines[:start_index] + [converted_list] + lines[end_index:])
else: else:
raise ValueError( raise ValueError(
......
...@@ -166,7 +166,7 @@ DUMMY_FUNCTION = { ...@@ -166,7 +166,7 @@ DUMMY_FUNCTION = {
def read_init(): def read_init():
""" Read the init and extracts PyTorch, TensorFlow, SentencePiece and Tokenizers objects. """ """ Read the init and extracts PyTorch, TensorFlow, SentencePiece and Tokenizers objects. """
with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r", encoding="utf-8") as f: with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines() lines = f.readlines()
line_index = 0 line_index = 0
...@@ -321,21 +321,21 @@ def check_dummies(overwrite=False): ...@@ -321,21 +321,21 @@ def check_dummies(overwrite=False):
tf_file = os.path.join(path, "dummy_tf_objects.py") tf_file = os.path.join(path, "dummy_tf_objects.py")
flax_file = os.path.join(path, "dummy_flax_objects.py") flax_file = os.path.join(path, "dummy_flax_objects.py")
with open(sentencepiece_file, "r", encoding="utf-8") as f: with open(sentencepiece_file, "r", encoding="utf-8", newline="\n") as f:
actual_sentencepiece_dummies = f.read() actual_sentencepiece_dummies = f.read()
with open(tokenizers_file, "r", encoding="utf-8") as f: with open(tokenizers_file, "r", encoding="utf-8", newline="\n") as f:
actual_tokenizers_dummies = f.read() actual_tokenizers_dummies = f.read()
with open(pt_file, "r", encoding="utf-8") as f: with open(pt_file, "r", encoding="utf-8", newline="\n") as f:
actual_pt_dummies = f.read() actual_pt_dummies = f.read()
with open(tf_file, "r", encoding="utf-8") as f: with open(tf_file, "r", encoding="utf-8", newline="\n") as f:
actual_tf_dummies = f.read() actual_tf_dummies = f.read()
with open(flax_file, "r", encoding="utf-8") as f: with open(flax_file, "r", encoding="utf-8", newline="\n") as f:
actual_flax_dummies = f.read() actual_flax_dummies = f.read()
if sentencepiece_dummies != actual_sentencepiece_dummies: if sentencepiece_dummies != actual_sentencepiece_dummies:
if overwrite: if overwrite:
print("Updating transformers.utils.dummy_sentencepiece_objects.py as the main __init__ has new objects.") print("Updating transformers.utils.dummy_sentencepiece_objects.py as the main __init__ has new objects.")
with open(sentencepiece_file, "w", encoding="utf-8") as f: with open(sentencepiece_file, "w", encoding="utf-8", newline="\n") as f:
f.write(sentencepiece_dummies) f.write(sentencepiece_dummies)
else: else:
raise ValueError( raise ValueError(
...@@ -346,7 +346,7 @@ def check_dummies(overwrite=False): ...@@ -346,7 +346,7 @@ def check_dummies(overwrite=False):
if tokenizers_dummies != actual_tokenizers_dummies: if tokenizers_dummies != actual_tokenizers_dummies:
if overwrite: if overwrite:
print("Updating transformers.utils.dummy_tokenizers_objects.py as the main __init__ has new objects.") print("Updating transformers.utils.dummy_tokenizers_objects.py as the main __init__ has new objects.")
with open(tokenizers_file, "w", encoding="utf-8") as f: with open(tokenizers_file, "w", encoding="utf-8", newline="\n") as f:
f.write(tokenizers_dummies) f.write(tokenizers_dummies)
else: else:
raise ValueError( raise ValueError(
...@@ -357,7 +357,7 @@ def check_dummies(overwrite=False): ...@@ -357,7 +357,7 @@ def check_dummies(overwrite=False):
if pt_dummies != actual_pt_dummies: if pt_dummies != actual_pt_dummies:
if overwrite: if overwrite:
print("Updating transformers.utils.dummy_pt_objects.py as the main __init__ has new objects.") print("Updating transformers.utils.dummy_pt_objects.py as the main __init__ has new objects.")
with open(pt_file, "w", encoding="utf-8") as f: with open(pt_file, "w", encoding="utf-8", newline="\n") as f:
f.write(pt_dummies) f.write(pt_dummies)
else: else:
raise ValueError( raise ValueError(
...@@ -368,7 +368,7 @@ def check_dummies(overwrite=False): ...@@ -368,7 +368,7 @@ def check_dummies(overwrite=False):
if tf_dummies != actual_tf_dummies: if tf_dummies != actual_tf_dummies:
if overwrite: if overwrite:
print("Updating transformers.utils.dummy_tf_objects.py as the main __init__ has new objects.") print("Updating transformers.utils.dummy_tf_objects.py as the main __init__ has new objects.")
with open(tf_file, "w", encoding="utf-8") as f: with open(tf_file, "w", encoding="utf-8", newline="\n") as f:
f.write(tf_dummies) f.write(tf_dummies)
else: else:
raise ValueError( raise ValueError(
...@@ -379,7 +379,7 @@ def check_dummies(overwrite=False): ...@@ -379,7 +379,7 @@ def check_dummies(overwrite=False):
if flax_dummies != actual_flax_dummies: if flax_dummies != actual_flax_dummies:
if overwrite: if overwrite:
print("Updating transformers.utils.dummy_flax_objects.py as the main __init__ has new objects.") print("Updating transformers.utils.dummy_flax_objects.py as the main __init__ has new objects.")
with open(flax_file, "w", encoding="utf-8") as f: with open(flax_file, "w", encoding="utf-8", newline="\n") as f:
f.write(flax_dummies) f.write(flax_dummies)
else: else:
raise ValueError( raise ValueError(
......
...@@ -197,7 +197,7 @@ def get_model_doc_files(): ...@@ -197,7 +197,7 @@ def get_model_doc_files():
def find_tested_models(test_file): def find_tested_models(test_file):
""" Parse the content of test_file to detect what's in all_model_classes""" """ Parse the content of test_file to detect what's in all_model_classes"""
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class
with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8") as f: with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f:
content = f.read() content = f.read()
all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content) all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content)
# Check with one less parenthesis # Check with one less parenthesis
...@@ -255,7 +255,7 @@ def check_all_models_are_tested(): ...@@ -255,7 +255,7 @@ def check_all_models_are_tested():
def find_documented_classes(doc_file): def find_documented_classes(doc_file):
""" Parse the content of doc_file to detect which classes it documents""" """ Parse the content of doc_file to detect which classes it documents"""
with open(os.path.join(PATH_TO_DOC, doc_file), "r", encoding="utf-8") as f: with open(os.path.join(PATH_TO_DOC, doc_file), "r", encoding="utf-8", newline="\n") as f:
content = f.read() content = f.read()
return re.findall(r"autoclass:: transformers.(\S+)\s+", content) return re.findall(r"autoclass:: transformers.(\S+)\s+", content)
...@@ -360,7 +360,7 @@ _re_decorator = re.compile(r"^\s*@(\S+)\s+$") ...@@ -360,7 +360,7 @@ _re_decorator = re.compile(r"^\s*@(\S+)\s+$")
def check_decorator_order(filename): def check_decorator_order(filename):
""" Check that in the test file `filename` the slow decorator is always last.""" """ Check that in the test file `filename` the slow decorator is always last."""
with open(filename, "r", encoding="utf-8") as f: with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines() lines = f.readlines()
decorator_before = None decorator_before = None
errors = [] errors = []
......
...@@ -357,14 +357,14 @@ doc_styler = DocstringStyler() ...@@ -357,14 +357,14 @@ doc_styler = DocstringStyler()
def style_rst_file(doc_file, max_len=119, check_only=False): def style_rst_file(doc_file, max_len=119, check_only=False):
""" Style one rst file `doc_file` to `max_len`.""" """ Style one rst file `doc_file` to `max_len`."""
with open(doc_file, "r", encoding="utf-8") as f: with open(doc_file, "r", encoding="utf-8", newline="\n") as f:
doc = f.read() doc = f.read()
clean_doc = rst_styler.style(doc, max_len=max_len) clean_doc = rst_styler.style(doc, max_len=max_len)
diff = clean_doc != doc diff = clean_doc != doc
if not check_only and diff: if not check_only and diff:
print(f"Overwriting content of {doc_file}.") print(f"Overwriting content of {doc_file}.")
with open(doc_file, "w", encoding="utf-8") as f: with open(doc_file, "w", encoding="utf-8", newline="\n") as f:
f.write(clean_doc) f.write(clean_doc)
return diff return diff
...@@ -404,7 +404,7 @@ def style_docstring(docstring, max_len=119): ...@@ -404,7 +404,7 @@ def style_docstring(docstring, max_len=119):
def style_file_docstrings(code_file, max_len=119, check_only=False): def style_file_docstrings(code_file, max_len=119, check_only=False):
"""Style all docstrings in `code_file` to `max_len`.""" """Style all docstrings in `code_file` to `max_len`."""
with open(code_file, "r", encoding="utf-8") as f: with open(code_file, "r", encoding="utf-8", newline="\n") as f:
code = f.read() code = f.read()
splits = code.split('"""') splits = code.split('"""')
splits = [ splits = [
...@@ -416,7 +416,7 @@ def style_file_docstrings(code_file, max_len=119, check_only=False): ...@@ -416,7 +416,7 @@ def style_file_docstrings(code_file, max_len=119, check_only=False):
diff = clean_code != code diff = clean_code != code
if not check_only and diff: if not check_only and diff:
print(f"Overwriting content of {code_file}.") print(f"Overwriting content of {code_file}.")
with open(code_file, "w", encoding="utf-8") as f: with open(code_file, "w", encoding="utf-8", newline="\n") as f:
f.write(clean_code) f.write(clean_code)
return diff return diff
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment