Unverified Commit 7732d0fe authored by Lysandre Debut's avatar Lysandre Debut Committed by GitHub
Browse files

Upgrade black to version ~=22.0 (#15565)

* Upgrade black to version ~=22.0

* Check copies

* Fix code
parent d923f762
...@@ -175,7 +175,7 @@ class YosoLSHCumulation(torch.autograd.Function): ...@@ -175,7 +175,7 @@ class YosoLSHCumulation(torch.autograd.Function):
use_cuda = query_mask.is_cuda use_cuda = query_mask.is_cuda
num_hash = config["num_hash"] num_hash = config["num_hash"]
hash_code_len = config["hash_code_len"] hash_code_len = config["hash_code_len"]
hashtable_capacity = int(2 ** hash_code_len) hashtable_capacity = int(2**hash_code_len)
if config["use_fast_hash"]: if config["use_fast_hash"]:
query_hash_code, key_hash_code = lsh_cumulation.fast_hash( query_hash_code, key_hash_code = lsh_cumulation.fast_hash(
...@@ -202,7 +202,7 @@ class YosoLSHCumulation(torch.autograd.Function): ...@@ -202,7 +202,7 @@ class YosoLSHCumulation(torch.autograd.Function):
use_cuda = grad.is_cuda use_cuda = grad.is_cuda
hash_code_len = config["hash_code_len"] hash_code_len = config["hash_code_len"]
hashtable_capacity = int(2 ** hash_code_len) hashtable_capacity = int(2**hash_code_len)
if config["lsh_backward"]: if config["lsh_backward"]:
grad_value = lsh_cumulation.lsh_cumulation( grad_value = lsh_cumulation.lsh_cumulation(
......
...@@ -214,7 +214,7 @@ def get_polynomial_decay_schedule_with_warmup( ...@@ -214,7 +214,7 @@ def get_polynomial_decay_schedule_with_warmup(
lr_range = lr_init - lr_end lr_range = lr_init - lr_end
decay_steps = num_training_steps - num_warmup_steps decay_steps = num_training_steps - num_warmup_steps
pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps
decay = lr_range * pct_remaining ** power + lr_end decay = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(optimizer, lr_lambda, last_epoch) return LambdaLR(optimizer, lr_lambda, last_epoch)
...@@ -586,7 +586,7 @@ class Adafactor(Optimizer): ...@@ -586,7 +586,7 @@ class Adafactor(Optimizer):
lr = self._get_lr(group, state) lr = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) beta2t = 1.0 - math.pow(state["step"], group["decay_rate"])
update = (grad ** 2) + group["eps"][0] update = (grad**2) + group["eps"][0]
if factored: if factored:
exp_avg_sq_row = state["exp_avg_sq_row"] exp_avg_sq_row = state["exp_avg_sq_row"]
exp_avg_sq_col = state["exp_avg_sq_col"] exp_avg_sq_col = state["exp_avg_sq_col"]
......
...@@ -204,7 +204,7 @@ def _ffmpeg_stream(ffmpeg_command, buflen: int): ...@@ -204,7 +204,7 @@ def _ffmpeg_stream(ffmpeg_command, buflen: int):
""" """
Internal function to create the generator of data through ffmpeg Internal function to create the generator of data through ffmpeg
""" """
bufsize = 2 ** 24 # 16Mo bufsize = 2**24 # 16Mo
try: try:
with subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, bufsize=bufsize) as ffmpeg_process: with subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, bufsize=bufsize) as ffmpeg_process:
while True: while True:
......
...@@ -102,7 +102,7 @@ class BeamSearchTester: ...@@ -102,7 +102,7 @@ class BeamSearchTester:
beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx))
# -10.0 is removed => -9.0 is worst score # -10.0 is removed => -9.0 is worst score
self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length ** beam_hyp.length_penalty)) self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty))
# -5.0 is better than worst score => should not be finished # -5.0 is better than worst score => should not be finished
self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length))
......
...@@ -544,7 +544,7 @@ class IBertModelIntegrationTest(unittest.TestCase): ...@@ -544,7 +544,7 @@ class IBertModelIntegrationTest(unittest.TestCase):
self.assertTrue(torch.allclose(q_int, q_int.round(), atol=1e-4)) self.assertTrue(torch.allclose(q_int, q_int.round(), atol=1e-4))
# Output of the quantize Softmax should not exceed the output_bit # Output of the quantize Softmax should not exceed the output_bit
self.assertTrue(q.abs().max() < 2 ** output_bit) self.assertTrue(q.abs().max() < 2**output_bit)
array = [[i + j for j in range(10)] for i in range(-10, 10)] array = [[i + j for j in range(10)] for i in range(-10, 10)]
_test(array) _test(array)
......
...@@ -252,7 +252,7 @@ class SwinModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -252,7 +252,7 @@ class SwinModelTest(ModelTesterMixin, unittest.TestCase):
# check that output_attentions also work using config # check that output_attentions also work using config
del inputs_dict["output_attentions"] del inputs_dict["output_attentions"]
config.output_attentions = True config.output_attentions = True
window_size_squared = config.window_size ** 2 window_size_squared = config.window_size**2
model = model_class(config) model = model_class(config)
model.to(torch_device) model.to(torch_device)
model.eval() model.eval()
......
...@@ -134,7 +134,7 @@ class ViTMAEModelTester: ...@@ -134,7 +134,7 @@ class ViTMAEModelTester:
patch_size = to_2tuple(self.patch_size) patch_size = to_2tuple(self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
expected_seq_len = num_patches expected_seq_len = num_patches
expected_num_channels = self.patch_size ** 2 * self.num_channels expected_num_channels = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, expected_seq_len, expected_num_channels)) self.parent.assertEqual(result.logits.shape, (self.batch_size, expected_seq_len, expected_num_channels))
def prepare_config_and_inputs_for_common(self): def prepare_config_and_inputs_for_common(self):
......
...@@ -68,7 +68,8 @@ class CopyCheckTester(unittest.TestCase): ...@@ -68,7 +68,8 @@ class CopyCheckTester(unittest.TestCase):
code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None: if overwrite_result is not None:
expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
code = black.format_str(code, mode=black.FileMode([black.TargetVersion.PY35], line_length=119)) mode = black.Mode(target_versions={black.TargetVersion.PY35}, line_length=119)
code = black.format_str(code, mode=mode)
fname = os.path.join(self.transformer_dir, "new_code.py") fname = os.path.join(self.transformer_dir, "new_code.py")
with open(fname, "w", newline="\n") as f: with open(fname, "w", newline="\n") as f:
f.write(code) f.write(code)
......
...@@ -88,7 +88,7 @@ def find_code_in_transformers(object_name): ...@@ -88,7 +88,7 @@ def find_code_in_transformers(object_name):
line_index = 0 line_index = 0
for name in parts[i + 1 :]: for name in parts[i + 1 :]:
while ( while (
line_index < len(lines) and re.search(fr"^{indent}(class|def)\s+{name}(\(|\:)", lines[line_index]) is None line_index < len(lines) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)", lines[line_index]) is None
): ):
line_index += 1 line_index += 1
indent += " " indent += " "
...@@ -130,7 +130,8 @@ def blackify(code): ...@@ -130,7 +130,8 @@ def blackify(code):
has_indent = len(get_indent(code)) > 0 has_indent = len(get_indent(code)) > 0
if has_indent: if has_indent:
code = f"class Bla:\n{code}" code = f"class Bla:\n{code}"
result = black.format_str(code, mode=black.FileMode([black.TargetVersion.PY35], line_length=119)) mode = black.Mode(target_versions={black.TargetVersion.PY35}, line_length=119)
result = black.format_str(code, mode=mode)
result, _ = style_docstrings_in_code(result) result, _ = style_docstrings_in_code(result)
return result[len("class Bla:\n") :] if has_indent else result return result[len("class Bla:\n") :] if has_indent else result
......
...@@ -28,7 +28,7 @@ fork_point_sha = subprocess.check_output("git merge-base master HEAD".split()).d ...@@ -28,7 +28,7 @@ fork_point_sha = subprocess.check_output("git merge-base master HEAD".split()).d
modified_files = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split() modified_files = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
joined_dirs = "|".join(sys.argv[1:]) joined_dirs = "|".join(sys.argv[1:])
regex = re.compile(fr"^({joined_dirs}).*?\.py$") regex = re.compile(rf"^({joined_dirs}).*?\.py$")
relevant_modified_files = [x for x in modified_files if regex.match(x)] relevant_modified_files = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="") print(" ".join(relevant_modified_files), end="")
...@@ -147,9 +147,8 @@ def format_code_example(code: str, max_len: int, in_docstring: bool = False): ...@@ -147,9 +147,8 @@ def format_code_example(code: str, max_len: int, in_docstring: bool = False):
for k, v in BLACK_AVOID_PATTERNS.items(): for k, v in BLACK_AVOID_PATTERNS.items():
full_code = full_code.replace(k, v) full_code = full_code.replace(k, v)
try: try:
formatted_code = black.format_str( mode = black.Mode(target_versions={black.TargetVersion.PY37}, line_length=line_length)
full_code, mode=black.FileMode([black.TargetVersion.PY37], line_length=line_length) formatted_code = black.format_str(full_code, mode=mode)
)
error = "" error = ""
except Exception as e: except Exception as e:
formatted_code = full_code formatted_code = full_code
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment