Commit c11b3e29 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Sort imports for optional third-party libraries.

These libraries aren't always installed in the virtual environment where
isort is running. Declaring them properly avoids mixing these
third-party imports with local imports.
parent 2a34d5b7
...@@ -95,7 +95,7 @@ jobs: ...@@ -95,7 +95,7 @@ jobs:
steps: steps:
- checkout - checkout
- run: sudo pip install --editable . - run: sudo pip install --editable .
- run: sudo pip install torch tensorflow tensorboardX scikit-learn - run: sudo pip install torch tensorflow
- run: sudo pip install black git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort flake8 - run: sudo pip install black git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort flake8
- run: black --check --line-length 119 examples templates transformers utils - run: black --check --line-length 119 examples templates transformers utils
- run: isort --check-only --recursive examples templates transformers utils - run: isort --check-only --recursive examples templates transformers utils
......
...@@ -19,6 +19,7 @@ import math ...@@ -19,6 +19,7 @@ import math
import os import os
import time import time
import psutil
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
...@@ -27,7 +28,6 @@ from torch.utils.data import BatchSampler, DataLoader, RandomSampler ...@@ -27,7 +28,6 @@ from torch.utils.data import BatchSampler, DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm from tqdm import tqdm
import psutil
from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups
from lm_seqs_dataset import LmSeqsDataset from lm_seqs_dataset import LmSeqsDataset
from transformers import get_linear_schedule_with_warmup from transformers import get_linear_schedule_with_warmup
......
...@@ -20,11 +20,10 @@ import logging ...@@ -20,11 +20,10 @@ import logging
import os import os
import socket import socket
import git
import numpy as np import numpy as np
import torch import torch
import git
logging.basicConfig( logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s", format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
......
...@@ -20,11 +20,10 @@ from collections import Counter ...@@ -20,11 +20,10 @@ from collections import Counter
import torch import torch
import torch.nn as nn import torch.nn as nn
from torch.utils.data import Dataset
import torchvision import torchvision
import torchvision.transforms as transforms import torchvision.transforms as transforms
from PIL import Image from PIL import Image
from torch.utils.data import Dataset
POOLING_BREAKDOWN = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} POOLING_BREAKDOWN = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
......
...@@ -26,12 +26,12 @@ import torch ...@@ -26,12 +26,12 @@ import torch
import torch.nn.functional as F import torch.nn.functional as F
import torch.optim as optim import torch.optim as optim
import torch.utils.data as data import torch.utils.data as data
from tqdm import tqdm, trange
from nltk.tokenize.treebank import TreebankWordDetokenizer from nltk.tokenize.treebank import TreebankWordDetokenizer
from pplm_classification_head import ClassificationHead
from torchtext import data as torchtext_data from torchtext import data as torchtext_data
from torchtext import datasets from torchtext import datasets
from tqdm import tqdm, trange
from pplm_classification_head import ClassificationHead
from transformers import GPT2LMHeadModel, GPT2Tokenizer from transformers import GPT2LMHeadModel, GPT2Tokenizer
......
...@@ -25,13 +25,13 @@ import random ...@@ -25,13 +25,13 @@ import random
import numpy as np import numpy as np
import torch import torch
from seqeval.metrics import f1_score, precision_score, recall_score
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange from tqdm import tqdm, trange
from seqeval.metrics import f1_score, precision_score, recall_score
from transformers import ( from transformers import (
WEIGHTS_NAME, WEIGHTS_NAME,
AdamW, AdamW,
......
import os import os
import tensorflow as tf import tensorflow as tf
import tensorflow_datasets import tensorflow_datasets
from transformers import ( from transformers import (
BertConfig, BertConfig,
BertForSequenceClassification, BertForSequenceClassification,
......
...@@ -9,9 +9,9 @@ import re ...@@ -9,9 +9,9 @@ import re
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
from absl import app, flags, logging from absl import app, flags, logging
from fastprogress import master_bar, progress_bar from fastprogress import master_bar, progress_bar
from seqeval import metrics from seqeval import metrics
from transformers import ( from transformers import (
TF2_WEIGHTS_NAME, TF2_WEIGHTS_NAME,
BertConfig, BertConfig,
......
...@@ -3,7 +3,21 @@ ensure_newline_before_comments = True ...@@ -3,7 +3,21 @@ ensure_newline_before_comments = True
force_grid_wrap = 0 force_grid_wrap = 0
include_trailing_comma = True include_trailing_comma = True
known_first_party = transformers known_first_party = transformers
known_third_party = packaging known_third_party =
fairseq
fastprogress
git
nltk
packaging
PIL
psutil
seqeval
sklearn
tensorboardX
tensorflow_datasets
torchtext
torchvision
line_length = 119 line_length = 119
lines_after_imports = 2 lines_after_imports = 2
multi_line_output = 3 multi_line_output = 3
......
...@@ -20,12 +20,12 @@ import argparse ...@@ -20,12 +20,12 @@ import argparse
import logging import logging
import pathlib import pathlib
import torch
from packaging import version
import fairseq import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers.modeling_bert import ( from transformers.modeling_bert import (
BertConfig, BertConfig,
BertIntermediate, BertIntermediate,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment