Commit 631be270 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Fix E722 flake8 warnings (x26).

parent b0f7db73
...@@ -44,7 +44,7 @@ from transformers import ( ...@@ -44,7 +44,7 @@ from transformers import (
try: try:
from torch.utils.tensorboard import SummaryWriter from torch.utils.tensorboard import SummaryWriter
except: except ImportError:
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
......
...@@ -37,7 +37,7 @@ from utils import logger ...@@ -37,7 +37,7 @@ from utils import logger
try: try:
from torch.utils.tensorboard import SummaryWriter from torch.utils.tensorboard import SummaryWriter
except: except ImportError:
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
......
...@@ -67,7 +67,7 @@ from ..utils_squad_evaluate import main as evaluate_on_squad ...@@ -67,7 +67,7 @@ from ..utils_squad_evaluate import main as evaluate_on_squad
try: try:
from torch.utils.tensorboard import SummaryWriter from torch.utils.tensorboard import SummaryWriter
except: except ImportError:
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
......
...@@ -62,7 +62,7 @@ from utils_mmimdb import ImageEncoder, JsonlDataset, collate_fn, get_image_trans ...@@ -62,7 +62,7 @@ from utils_mmimdb import ImageEncoder, JsonlDataset, collate_fn, get_image_trans
try: try:
from torch.utils.tensorboard import SummaryWriter from torch.utils.tensorboard import SummaryWriter
except: except ImportError:
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
......
...@@ -697,8 +697,8 @@ def run_pplm_example( ...@@ -697,8 +697,8 @@ def run_pplm_example(
print("= Perturbed generated text {} =".format(i + 1)) print("= Perturbed generated text {} =".format(i + 1))
print(pert_gen_text) print(pert_gen_text)
print() print()
except: except Exception as exc:
pass print("Ignoring error while generating perturbed text:", exc)
# keep the prefix, perturbed seq, original seq for each index # keep the prefix, perturbed seq, original seq for each index
generated_texts.append((tokenized_cond_text, pert_gen_tok_text, unpert_gen_tok_text)) generated_texts.append((tokenized_cond_text, pert_gen_tok_text, unpert_gen_tok_text))
......
...@@ -285,7 +285,7 @@ def train_discriminator( ...@@ -285,7 +285,7 @@ def train_discriminator(
for i, line in enumerate(f): for i, line in enumerate(f):
try: try:
data.append(eval(line)) data.append(eval(line))
except: except Exception:
print("Error evaluating line {}: {}".format(i, line)) print("Error evaluating line {}: {}".format(i, line))
continue continue
x = [] x = []
...@@ -303,7 +303,7 @@ def train_discriminator( ...@@ -303,7 +303,7 @@ def train_discriminator(
continue continue
x.append(seq) x.append(seq)
y.append(d["label"]) y.append(d["label"])
except: except Exception:
print("Error evaluating / tokenizing" " line {}, skipping it".format(i)) print("Error evaluating / tokenizing" " line {}, skipping it".format(i))
pass pass
...@@ -343,7 +343,7 @@ def train_discriminator( ...@@ -343,7 +343,7 @@ def train_discriminator(
continue continue
x.append(seq) x.append(seq)
y.append(int(np.sum(d["label"]) > 0)) y.append(int(np.sum(d["label"]) > 0))
except: except Exception:
print("Error evaluating / tokenizing" " line {}, skipping it".format(i)) print("Error evaluating / tokenizing" " line {}, skipping it".format(i))
pass pass
...@@ -402,7 +402,7 @@ def train_discriminator( ...@@ -402,7 +402,7 @@ def train_discriminator(
x.append(seq) x.append(seq)
y.append(class2idx[label]) y.append(class2idx[label])
except: except Exception:
print("Error tokenizing line {}, skipping it".format(i)) print("Error tokenizing line {}, skipping it".format(i))
pass pass
......
...@@ -64,7 +64,7 @@ from transformers import glue_processors as processors ...@@ -64,7 +64,7 @@ from transformers import glue_processors as processors
try: try:
from torch.utils.tensorboard import SummaryWriter from torch.utils.tensorboard import SummaryWriter
except: except ImportError:
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
......
...@@ -63,7 +63,7 @@ from transformers import ( ...@@ -63,7 +63,7 @@ from transformers import (
try: try:
from torch.utils.tensorboard import SummaryWriter from torch.utils.tensorboard import SummaryWriter
except: except ImportError:
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
......
...@@ -48,7 +48,7 @@ from utils_multiple_choice import convert_examples_to_features, processors ...@@ -48,7 +48,7 @@ from utils_multiple_choice import convert_examples_to_features, processors
try: try:
from torch.utils.tensorboard import SummaryWriter from torch.utils.tensorboard import SummaryWriter
except: except ImportError:
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
......
...@@ -64,7 +64,7 @@ from transformers.data.processors.squad import SquadResult, SquadV1Processor, Sq ...@@ -64,7 +64,7 @@ from transformers.data.processors.squad import SquadResult, SquadV1Processor, Sq
try: try:
from torch.utils.tensorboard import SummaryWriter from torch.utils.tensorboard import SummaryWriter
except: except ImportError:
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
......
...@@ -52,7 +52,7 @@ from transformers import xnli_processors as processors ...@@ -52,7 +52,7 @@ from transformers import xnli_processors as processors
try: try:
from torch.utils.tensorboard import SummaryWriter from torch.utils.tensorboard import SummaryWriter
except: except ImportError:
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
......
...@@ -63,7 +63,7 @@ from utils_squad_evaluate import main as evaluate_on_squad ...@@ -63,7 +63,7 @@ from utils_squad_evaluate import main as evaluate_on_squad
try: try:
from torch.utils.tensorboard import SummaryWriter from torch.utils.tensorboard import SummaryWriter
except: except ImportError:
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
......
...@@ -6,12 +6,12 @@ __version__ = "2.3.0" ...@@ -6,12 +6,12 @@ __version__ = "2.3.0"
# and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493 # and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
try: try:
import absl.logging import absl.logging
except ImportError:
pass
else:
absl.logging.set_verbosity("info") absl.logging.set_verbosity("info")
absl.logging.set_stderrthreshold("info") absl.logging.set_stderrthreshold("info")
absl.logging._warn_preinit_stderr = False absl.logging._warn_preinit_stderr = False
except:
pass
import logging import logging
......
...@@ -205,10 +205,8 @@ class HfFolder: ...@@ -205,10 +205,8 @@ class HfFolder:
try: try:
with open(cls.path_token, "r") as f: with open(cls.path_token, "r") as f:
return f.read() return f.read()
except: except FileNotFoundError:
# this is too wide. When Py2 is dead use: pass
# `except FileNotFoundError:` instead
return None
@classmethod @classmethod
def delete_token(cls): def delete_token(cls):
...@@ -218,5 +216,5 @@ class HfFolder: ...@@ -218,5 +216,5 @@ class HfFolder:
""" """
try: try:
os.remove(cls.path_token) os.remove(cls.path_token)
except: except FileNotFoundError:
return pass
...@@ -439,7 +439,7 @@ class PreTrainedModel(nn.Module): ...@@ -439,7 +439,7 @@ class PreTrainedModel(nn.Module):
if state_dict is None and not from_tf: if state_dict is None and not from_tf:
try: try:
state_dict = torch.load(resolved_archive_file, map_location="cpu") state_dict = torch.load(resolved_archive_file, map_location="cpu")
except: except Exception:
raise OSError( raise OSError(
"Unable to load weights from pytorch checkpoint file. " "Unable to load weights from pytorch checkpoint file. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. " "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
......
...@@ -333,13 +333,13 @@ class TFCommonTestCases: ...@@ -333,13 +333,13 @@ class TFCommonTestCases:
# We used to fall back to just synthetically creating a dummy tensor of ones: # We used to fall back to just synthetically creating a dummy tensor of ones:
try: try:
x = wte(input_ids, mode="embedding") x = wte(input_ids, mode="embedding")
except: except Exception:
try: try:
x = wte([input_ids], mode="embedding") x = wte([input_ids], mode="embedding")
except: except Exception:
try: try:
x = wte([input_ids, None, None, None], mode="embedding") x = wte([input_ids, None, None, None], mode="embedding")
except: except Exception:
if hasattr(self.model_tester, "embedding_size"): if hasattr(self.model_tester, "embedding_size"):
x = tf.ones(input_ids.shape + [self.model_tester.embedding_size], dtype=tf.dtypes.float32) x = tf.ones(input_ids.shape + [self.model_tester.embedding_size], dtype=tf.dtypes.float32)
else: else:
......
...@@ -168,11 +168,12 @@ class CTRLTokenizer(PreTrainedTokenizer): ...@@ -168,11 +168,12 @@ class CTRLTokenizer(PreTrainedTokenizer):
while i < len(word): while i < len(word):
try: try:
j = word.index(first, i) j = word.index(first, i)
new_word.extend(word[i:j]) except ValueError:
i = j
except:
new_word.extend(word[i:]) new_word.extend(word[i:])
break break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second: if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second) new_word.append(first + second)
......
...@@ -178,11 +178,12 @@ class GPT2Tokenizer(PreTrainedTokenizer): ...@@ -178,11 +178,12 @@ class GPT2Tokenizer(PreTrainedTokenizer):
while i < len(word): while i < len(word):
try: try:
j = word.index(first, i) j = word.index(first, i)
new_word.extend(word[i:j]) except ValueError:
i = j
except:
new_word.extend(word[i:]) new_word.extend(word[i:])
break break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second: if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second) new_word.append(first + second)
......
...@@ -136,11 +136,12 @@ class OpenAIGPTTokenizer(PreTrainedTokenizer): ...@@ -136,11 +136,12 @@ class OpenAIGPTTokenizer(PreTrainedTokenizer):
while i < len(word): while i < len(word):
try: try:
j = word.index(first, i) j = word.index(first, i)
new_word.extend(word[i:j]) except ValueError:
i = j
except:
new_word.extend(word[i:]) new_word.extend(word[i:])
break break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second: if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second) new_word.append(first + second)
......
...@@ -683,11 +683,12 @@ class XLMTokenizer(PreTrainedTokenizer): ...@@ -683,11 +683,12 @@ class XLMTokenizer(PreTrainedTokenizer):
while i < len(word): while i < len(word):
try: try:
j = word.index(first, i) j = word.index(first, i)
new_word.extend(word[i:j]) except ValueError:
i = j
except:
new_word.extend(word[i:]) new_word.extend(word[i:])
break break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second: if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second) new_word.append(first + second)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment