Unverified Commit 6b04d61c authored by Kashif Rasul's avatar Kashif Rasul Committed by GitHub
Browse files

[Styling] stylify using ruff (#5841)

* ruff format

* not need to use doc-builder's black styling as the doc is styled in ruff

* make fix-copies

* comment

* use run_ruff
parent 9c7f7fc4
...@@ -146,9 +146,7 @@ class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -146,9 +146,7 @@ class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin):
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear": elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model. # this schedule is very specific to the latent diffusion model.
self.betas = ( self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
)
elif beta_schedule == "squaredcos_cap_v2": elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule # Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps) self.betas = betas_for_alpha_bar(num_train_timesteps)
......
...@@ -132,9 +132,7 @@ class PNDMScheduler(SchedulerMixin, ConfigMixin): ...@@ -132,9 +132,7 @@ class PNDMScheduler(SchedulerMixin, ConfigMixin):
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear": elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model. # this schedule is very specific to the latent diffusion model.
self.betas = ( self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
)
elif beta_schedule == "squaredcos_cap_v2": elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule # Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps) self.betas = betas_for_alpha_bar(num_train_timesteps)
......
...@@ -134,9 +134,7 @@ class RePaintScheduler(SchedulerMixin, ConfigMixin): ...@@ -134,9 +134,7 @@ class RePaintScheduler(SchedulerMixin, ConfigMixin):
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear": elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model. # this schedule is very specific to the latent diffusion model.
self.betas = ( self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
)
elif beta_schedule == "squaredcos_cap_v2": elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule # Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps) self.betas = betas_for_alpha_bar(num_train_timesteps)
......
...@@ -79,9 +79,7 @@ class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin): ...@@ -79,9 +79,7 @@ class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin):
# TODO(Patrick) better comments + non-PyTorch # TODO(Patrick) better comments + non-PyTorch
# postprocess model score # postprocess model score
log_mean_coeff = ( log_mean_coeff = -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff)) std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff))
std = std.flatten() std = std.flatten()
while len(std.shape) < len(score.shape): while len(std.shape) < len(score.shape):
......
...@@ -162,9 +162,7 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -162,9 +162,7 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear": elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model. # this schedule is very specific to the latent diffusion model.
self.betas = ( self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
)
elif beta_schedule == "squaredcos_cap_v2": elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule # Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps) self.betas = betas_for_alpha_bar(num_train_timesteps)
......
...@@ -87,9 +87,9 @@ def get_relative_imports(module_file): ...@@ -87,9 +87,9 @@ def get_relative_imports(module_file):
content = f.read() content = f.read()
# Imports of the form `import .xxx` # Imports of the form `import .xxx`
relative_imports = re.findall("^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE) relative_imports = re.findall(r"^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE)
# Imports of the form `from .xxx import yyy` # Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE) relative_imports += re.findall(r"^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE)
# Unique-ify # Unique-ify
return list(set(relative_imports)) return list(set(relative_imports))
...@@ -131,9 +131,9 @@ def check_imports(filename): ...@@ -131,9 +131,9 @@ def check_imports(filename):
content = f.read() content = f.read()
# Imports of the form `import xxx` # Imports of the form `import xxx`
imports = re.findall("^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE) imports = re.findall(r"^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE)
# Imports of the form `from xxx import yyy` # Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE) imports += re.findall(r"^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE)
# Only keep the top-level module # Only keep the top-level module
imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")] imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")]
......
...@@ -162,8 +162,8 @@ class PriorTransformerIntegrationTests(unittest.TestCase): ...@@ -162,8 +162,8 @@ class PriorTransformerIntegrationTests(unittest.TestCase):
@parameterized.expand( @parameterized.expand(
[ [
# fmt: off # fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], [37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on # fmt: on
] ]
) )
......
...@@ -457,8 +457,16 @@ class AutoencoderKLIntegrationTests(unittest.TestCase): ...@@ -457,8 +457,16 @@ class AutoencoderKLIntegrationTests(unittest.TestCase):
@parameterized.expand( @parameterized.expand(
[ [
# fmt: off # fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], 33,
[-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824],
[-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824],
],
[
47,
[-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089],
[0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131],
],
# fmt: on # fmt: on
] ]
) )
...@@ -504,8 +512,16 @@ class AutoencoderKLIntegrationTests(unittest.TestCase): ...@@ -504,8 +512,16 @@ class AutoencoderKLIntegrationTests(unittest.TestCase):
@parameterized.expand( @parameterized.expand(
[ [
# fmt: off # fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], 33,
[-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814],
[-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824],
],
[
47,
[-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085],
[0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131],
],
# fmt: on # fmt: on
] ]
) )
...@@ -687,8 +703,16 @@ class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase): ...@@ -687,8 +703,16 @@ class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase):
@parameterized.expand( @parameterized.expand(
[ [
# fmt: off # fmt: off
[33, [-0.0344, 0.2912, 0.1687, -0.0137, -0.3462, 0.3552, -0.1337, 0.1078], [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824]], [
[47, [0.4400, 0.0543, 0.2873, 0.2946, 0.0553, 0.0839, -0.1585, 0.2529], [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089]], 33,
[-0.0344, 0.2912, 0.1687, -0.0137, -0.3462, 0.3552, -0.1337, 0.1078],
[-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824],
],
[
47,
[0.4400, 0.0543, 0.2873, 0.2946, 0.0553, 0.0839, -0.1585, 0.2529],
[-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089],
],
# fmt: on # fmt: on
] ]
) )
...@@ -710,8 +734,16 @@ class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase): ...@@ -710,8 +734,16 @@ class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase):
@parameterized.expand( @parameterized.expand(
[ [
# fmt: off # fmt: off
[33, [-0.0340, 0.2870, 0.1698, -0.0105, -0.3448, 0.3529, -0.1321, 0.1097], [-0.0344, 0.2912, 0.1687, -0.0137, -0.3462, 0.3552, -0.1337, 0.1078]], [
[47, [0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531], [0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531]], 33,
[-0.0340, 0.2870, 0.1698, -0.0105, -0.3448, 0.3529, -0.1321, 0.1097],
[-0.0344, 0.2912, 0.1687, -0.0137, -0.3462, 0.3552, -0.1337, 0.1078],
],
[
47,
[0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531],
[0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531],
],
# fmt: on # fmt: on
] ]
) )
...@@ -732,7 +764,7 @@ class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase): ...@@ -732,7 +764,7 @@ class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase):
@parameterized.expand( @parameterized.expand(
[ [
# fmt: off # fmt: off
[13, [-0.0521, -0.2939, 0.1540, -0.1855, -0.5936, -0.3138, -0.4579, -0.2275]], [13, [-0.0521, -0.2939, 0.1540, -0.1855, -0.5936, -0.3138, -0.4579, -0.2275]],
[37, [-0.1820, -0.4345, -0.0455, -0.2923, -0.8035, -0.5089, -0.4795, -0.3106]], [37, [-0.1820, -0.4345, -0.0455, -0.2923, -0.8035, -0.5089, -0.4795, -0.3106]],
# fmt: on # fmt: on
] ]
......
...@@ -19,8 +19,6 @@ import sys ...@@ -19,8 +19,6 @@ import sys
import tempfile import tempfile
import unittest import unittest
import black
git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils")) sys.path.append(os.path.join(git_repo_path, "utils"))
...@@ -65,8 +63,7 @@ class CopyCheckTester(unittest.TestCase): ...@@ -65,8 +63,7 @@ class CopyCheckTester(unittest.TestCase):
code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None: if overwrite_result is not None:
expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
mode = black.Mode(target_versions={black.TargetVersion.PY35}, line_length=119) code = check_copies.run_ruff(code)
code = black.format_str(code, mode=mode)
fname = os.path.join(self.diffusers_dir, "new_code.py") fname = os.path.join(self.diffusers_dir, "new_code.py")
with open(fname, "w", newline="\n") as f: with open(fname, "w", newline="\n") as f:
f.write(code) f.write(code)
......
...@@ -759,9 +759,10 @@ class PipelineTesterMixin: ...@@ -759,9 +759,10 @@ class PipelineTesterMixin:
for k, v in pipe.components.items() for k, v in pipe.components.items()
if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload
] ]
self.assertTrue( (
all(v.device.type == "cpu" for v in offloaded_modules) self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)),
), f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}" f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}",
)
@unittest.skipIf( @unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), torch_device != "cuda" or not is_xformers_available(),
......
...@@ -36,7 +36,7 @@ CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING ...@@ -36,7 +36,7 @@ CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_re_checkpoint = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)") _re_checkpoint = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = { CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = {
......
...@@ -17,9 +17,7 @@ import argparse ...@@ -17,9 +17,7 @@ import argparse
import glob import glob
import os import os
import re import re
import subprocess
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command # All paths are set with the intent you should run this script from the root of the repo with the command
...@@ -46,7 +44,12 @@ def find_code_in_diffusers(object_name): ...@@ -46,7 +44,12 @@ def find_code_in_diffusers(object_name):
if i >= len(parts): if i >= len(parts):
raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}.") raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}.")
with open(os.path.join(DIFFUSERS_PATH, f"{module}.py"), "r", encoding="utf-8", newline="\n") as f: with open(
os.path.join(DIFFUSERS_PATH, f"{module}.py"),
"r",
encoding="utf-8",
newline="\n",
) as f:
lines = f.readlines() lines = f.readlines()
# Now let's find the class / func in the code! # Now let's find the class / func in the code!
...@@ -90,17 +93,29 @@ def get_indent(code): ...@@ -90,17 +93,29 @@ def get_indent(code):
return "" return ""
def blackify(code): def run_ruff(code):
command = ["ruff", "format", "-", "--config", "pyproject.toml", "--silent"]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, _ = process.communicate(input=code.encode())
return stdout.decode()
def stylify(code: str) -> str:
""" """
Applies the black part of our `make style` command to `code`. Applies the ruff part of our `make style` command to some code. This formats the code using `ruff format`.
As `ruff` does not provide a python api this cannot be done on the fly.
Args:
code (`str`): The code to format.
Returns:
`str`: The formatted code.
""" """
has_indent = len(get_indent(code)) > 0 has_indent = len(get_indent(code)) > 0
if has_indent: if has_indent:
code = f"class Bla:\n{code}" code = f"class Bla:\n{code}"
mode = black.Mode(target_versions={black.TargetVersion.PY37}, line_length=119, preview=True) formatted_code = run_ruff(code)
result = black.format_str(code, mode=mode) return formatted_code[len("class Bla:\n") :] if has_indent else formatted_code
result, _ = style_docstrings_in_code(result)
return result[len("class Bla:\n") :] if has_indent else result
def is_copy_consistent(filename, overwrite=False): def is_copy_consistent(filename, overwrite=False):
...@@ -160,9 +175,9 @@ def is_copy_consistent(filename, overwrite=False): ...@@ -160,9 +175,9 @@ def is_copy_consistent(filename, overwrite=False):
theoretical_code = re.sub(obj1.lower(), obj2.lower(), theoretical_code) theoretical_code = re.sub(obj1.lower(), obj2.lower(), theoretical_code)
theoretical_code = re.sub(obj1.upper(), obj2.upper(), theoretical_code) theoretical_code = re.sub(obj1.upper(), obj2.upper(), theoretical_code)
# Blackify after replacement. To be able to do that, we need the header (class or function definition) # stylify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line # from the previous line
theoretical_code = blackify(lines[start_index - 1] + theoretical_code) theoretical_code = stylify(lines[start_index - 1] + theoretical_code)
theoretical_code = theoretical_code[len(lines[start_index - 1]) :] theoretical_code = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly. # Test for a diff and act accordingly.
...@@ -197,7 +212,11 @@ def check_copies(overwrite: bool = False): ...@@ -197,7 +212,11 @@ def check_copies(overwrite: bool = False):
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") parser.add_argument(
"--fix_and_overwrite",
action="store_true",
help="Whether to fix inconsistencies.",
)
args = parser.parse_args() args = parser.parse_args()
check_copies(args.fix_and_overwrite) check_copies(args.fix_and_overwrite)
...@@ -36,9 +36,9 @@ _re_import_struct_add_one = re.compile(r'^\s*_import_structure\["\S*"\]\.append\ ...@@ -36,9 +36,9 @@ _re_import_struct_add_one = re.compile(r'^\s*_import_structure\["\S*"\]\.append\
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_re_import_struct_add_many = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") _re_import_struct_add_many = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel", # Catches a line with an object between quotes and a comma: "MyModel",
_re_quote_object = re.compile('^\s+"([^"]+)",') _re_quote_object = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"], # Catches a line with objects between brackets only: ["foo", "bar"],
_re_between_brackets = re.compile("^\s+\[([^\]]+)\]") _re_between_brackets = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo # Catches a line with from foo import bar, bla, boo
_re_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") _re_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try: # Catches a line with try:
...@@ -79,7 +79,7 @@ def parse_init(init_file): ...@@ -79,7 +79,7 @@ def parse_init(init_file):
# If we have everything on a single line, let's deal with it. # If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(line): if _re_one_line_import_struct.search(line):
content = _re_one_line_import_struct.search(line).groups()[0] content = _re_one_line_import_struct.search(line).groups()[0]
imports = re.findall("\[([^\]]+)\]", content) imports = re.findall(r"\[([^\]]+)\]", content)
for imp in imports: for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", ")]) objects.extend([obj[1:-1] for obj in imp.split(", ")])
line_index += 1 line_index += 1
......
...@@ -533,7 +533,7 @@ def find_all_documented_objects(): ...@@ -533,7 +533,7 @@ def find_all_documented_objects():
for doc_file in Path(PATH_TO_DOC).glob("**/*.md"): for doc_file in Path(PATH_TO_DOC).glob("**/*.md"):
with open(doc_file, "r", encoding="utf-8", newline="\n") as f: with open(doc_file, "r", encoding="utf-8", newline="\n") as f:
content = f.read() content = f.read()
raw_doc_objs = re.findall("\[\[autodoc\]\]\s+(\S+)\s+", content) raw_doc_objs = re.findall(r"\[\[autodoc\]\]\s+(\S+)\s+", content)
documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs]
return documented_obj return documented_obj
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment