Commit d62875cc authored by John Reese's avatar John Reese Committed by Facebook GitHub Bot
Browse files

[codemod][usort] apply import merging for fbcode (8 of 11)

Summary:
Applies new import merging and sorting from µsort v1.0.

When merging imports, µsort will make a best-effort to move associated
comments to match merged elements, but there are known limitations due to
the diynamic nature of Python and developer tooling. These changes should
not produce any dangerous runtime changes, but may require touch-ups to
satisfy linters and other tooling.

Note that µsort uses case-insensitive, lexicographical sorting, which
results in a different ordering compared to isort. This provides a more
consistent sorting order, matching the case-insensitive order used when
sorting import statements by module name, and ensures that "frog", "FROG",
and "Frog" always sort next to each other.

For details on µsort's sorting and merging semantics, see the user guide:
https://usort.readthedocs.io/en/stable/guide.html#sorting

Reviewed By: lisroach

Differential Revision: D36402214

fbshipit-source-id: b641bfa9d46242188524d4ae2c44998922a62b4c
parent 44f4a5ea
...@@ -5,21 +5,18 @@ import torch ...@@ -5,21 +5,18 @@ import torch
import torchaudio import torchaudio
from parameterized import parameterized from parameterized import parameterized
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
skipIfNoExec,
skipIfNoSox,
get_wav_data, get_wav_data,
save_wav,
load_wav, load_wav,
save_wav,
skipIfNoExec,
skipIfNoSox,
sox_utils, sox_utils,
TempDirMixin,
torch_script, torch_script,
TorchaudioTestCase,
) )
from .common import ( from .common import get_enc_params, name_func
name_func,
get_enc_params,
)
def py_info_func(filepath: str) -> torchaudio.backend.sox_io_backend.AudioMetaData: def py_info_func(filepath: str) -> torchaudio.backend.sox_io_backend.AudioMetaData:
......
from .backend_utils import ( from .backend_utils import set_audio_backend
set_audio_backend,
)
from .case_utils import ( from .case_utils import (
TempDirMixin,
HttpServerMixin, HttpServerMixin,
TestBaseMixin,
PytorchTestCase,
TorchaudioTestCase,
is_ffmpeg_available, is_ffmpeg_available,
PytorchTestCase,
skipIfNoCtcDecoder, skipIfNoCtcDecoder,
skipIfNoCuda, skipIfNoCuda,
skipIfNoExec, skipIfNoExec,
skipIfNoModule, skipIfNoFFmpeg,
skipIfNoKaldi, skipIfNoKaldi,
skipIfNoSox, skipIfNoModule,
skipIfRocm,
skipIfNoQengine, skipIfNoQengine,
skipIfNoFFmpeg, skipIfNoSox,
skipIfPy310, skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
) )
from .data_utils import ( from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
get_asset_path,
get_whitenoise,
get_sinusoid,
get_spectrogram,
)
from .func_utils import torch_script from .func_utils import torch_script
from .image_utils import ( from .image_utils import get_image, save_image
save_image,
get_image,
)
from .parameterized_utils import load_params, nested_params from .parameterized_utils import load_params, nested_params
from .wav_utils import ( from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
get_wav_data,
normalize_wav,
load_wav,
save_wav,
)
__all__ = [ __all__ = [
"get_asset_path", "get_asset_path",
......
...@@ -9,7 +9,11 @@ import unittest ...@@ -9,7 +9,11 @@ import unittest
import torch import torch
from torch.testing._internal.common_utils import TestCase as PytorchTestCase from torch.testing._internal.common_utils import TestCase as PytorchTestCase
from torchaudio._internal.module_utils import is_module_available, is_sox_available, is_kaldi_available from torchaudio._internal.module_utils import (
is_kaldi_available,
is_module_available,
is_sox_available,
)
from .backend_utils import set_audio_backend from .backend_utils import set_audio_backend
......
import os.path import os.path
from typing import Union, Optional from typing import Optional, Union
import torch import torch
......
...@@ -2,17 +2,14 @@ ...@@ -2,17 +2,14 @@
import torchaudio.compliance.kaldi import torchaudio.compliance.kaldi
from parameterized import parameterized from parameterized import parameterized
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import (
TestBaseMixin,
TempDirMixin,
load_params,
skipIfNoExec,
get_asset_path, get_asset_path,
load_params,
load_wav, load_wav,
skipIfNoExec,
TempDirMixin,
TestBaseMixin,
) )
from torchaudio_unittest.common_utils.kaldi_utils import ( from torchaudio_unittest.common_utils.kaldi_utils import convert_args, run_kaldi
convert_args,
run_kaldi,
)
class Kaldi(TempDirMixin, TestBaseMixin): class Kaldi(TempDirMixin, TestBaseMixin):
......
...@@ -3,11 +3,11 @@ from pathlib import Path ...@@ -3,11 +3,11 @@ from pathlib import Path
from torchaudio.datasets import cmuarctic from torchaudio.datasets import cmuarctic
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise, get_whitenoise,
save_wav,
normalize_wav, normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
) )
......
...@@ -2,10 +2,7 @@ import os ...@@ -2,10 +2,7 @@ import os
from pathlib import Path from pathlib import Path
from torchaudio.datasets import CMUDict from torchaudio.datasets import CMUDict
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import TempDirMixin, TorchaudioTestCase
TempDirMixin,
TorchaudioTestCase,
)
def get_mock_dataset(root_dir, return_punc=False): def get_mock_dataset(root_dir, return_punc=False):
......
import csv import csv
import os import os
from pathlib import Path from pathlib import Path
from typing import Tuple, Dict from typing import Dict, Tuple
from torch import Tensor from torch import Tensor
from torchaudio.datasets import COMMONVOICE from torchaudio.datasets import COMMONVOICE
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise, get_whitenoise,
save_wav,
normalize_wav, normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
) )
_ORIGINAL_EXT_AUDIO = COMMONVOICE._ext_audio _ORIGINAL_EXT_AUDIO = COMMONVOICE._ext_audio
......
...@@ -3,10 +3,10 @@ from pathlib import Path ...@@ -3,10 +3,10 @@ from pathlib import Path
import pytest import pytest
from torchaudio.datasets import dr_vctk from torchaudio.datasets import dr_vctk
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise, get_whitenoise,
save_wav, save_wav,
TempDirMixin,
TorchaudioTestCase,
) )
......
...@@ -3,11 +3,11 @@ from pathlib import Path ...@@ -3,11 +3,11 @@ from pathlib import Path
from torchaudio.datasets import gtzan from torchaudio.datasets import gtzan
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise, get_whitenoise,
save_wav,
normalize_wav, normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
) )
......
...@@ -3,11 +3,11 @@ from pathlib import Path ...@@ -3,11 +3,11 @@ from pathlib import Path
from torchaudio.datasets import librispeech from torchaudio.datasets import librispeech
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise, get_whitenoise,
save_wav,
normalize_wav, normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
) )
# Used to generate a unique transcript for each dummy audio file # Used to generate a unique transcript for each dummy audio file
......
...@@ -3,11 +3,11 @@ from pathlib import Path ...@@ -3,11 +3,11 @@ from pathlib import Path
from torchaudio.datasets.libritts import LIBRITTS from torchaudio.datasets.libritts import LIBRITTS
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise, get_whitenoise,
save_wav,
normalize_wav, normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
) )
_UTTERANCE_IDS = [ _UTTERANCE_IDS = [
......
...@@ -4,11 +4,11 @@ from pathlib import Path ...@@ -4,11 +4,11 @@ from pathlib import Path
from torchaudio.datasets import ljspeech from torchaudio.datasets import ljspeech
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise, get_whitenoise,
normalize_wav, normalize_wav,
save_wav, save_wav,
TempDirMixin,
TorchaudioTestCase,
) )
_TRANSCRIPTS = [ _TRANSCRIPTS = [
......
...@@ -5,10 +5,10 @@ from pathlib import Path ...@@ -5,10 +5,10 @@ from pathlib import Path
from parameterized import parameterized from parameterized import parameterized
from torchaudio.datasets import quesst14 from torchaudio.datasets import quesst14
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise, get_whitenoise,
save_wav, save_wav,
TempDirMixin,
TorchaudioTestCase,
) )
......
...@@ -3,11 +3,11 @@ from pathlib import Path ...@@ -3,11 +3,11 @@ from pathlib import Path
from torchaudio.datasets import speechcommands from torchaudio.datasets import speechcommands
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise, get_whitenoise,
normalize_wav, normalize_wav,
save_wav, save_wav,
TempDirMixin,
TorchaudioTestCase,
) )
_LABELS = [ _LABELS = [
......
...@@ -3,7 +3,13 @@ import platform ...@@ -3,7 +3,13 @@ import platform
from pathlib import Path from pathlib import Path
from torchaudio.datasets import tedlium from torchaudio.datasets import tedlium
from torchaudio_unittest.common_utils import TempDirMixin, TorchaudioTestCase, get_whitenoise, save_wav, skipIfNoSox from torchaudio_unittest.common_utils import (
get_whitenoise,
save_wav,
skipIfNoSox,
TempDirMixin,
TorchaudioTestCase,
)
# Used to generate a unique utterance for each dummy audio file # Used to generate a unique utterance for each dummy audio file
_UTTERANCES = [ _UTTERANCES = [
......
...@@ -3,11 +3,11 @@ from pathlib import Path ...@@ -3,11 +3,11 @@ from pathlib import Path
from torchaudio.datasets import vctk from torchaudio.datasets import vctk
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise, get_whitenoise,
save_wav,
normalize_wav, normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
) )
# Used to generate a unique transcript for each dummy audio file # Used to generate a unique transcript for each dummy audio file
......
...@@ -3,11 +3,11 @@ from pathlib import Path ...@@ -3,11 +3,11 @@ from pathlib import Path
from torchaudio.datasets import yesno from torchaudio.datasets import yesno
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise, get_whitenoise,
save_wav,
normalize_wav, normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
) )
......
...@@ -5,9 +5,9 @@ from unittest.mock import patch ...@@ -5,9 +5,9 @@ from unittest.mock import patch
import torch import torch
from parameterized import parameterized from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import TorchaudioTestCase, skipIfNoModule from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import MockSentencePieceProcessor, MockCustomDataset, MockDataloader from .utils import MockCustomDataset, MockDataloader, MockSentencePieceProcessor
if is_module_available("pytorch_lightning", "sentencepiece"): if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.librispeech.lightning import LibriSpeechRNNTModule from asr.emformer_rnnt.librispeech.lightning import LibriSpeechRNNTModule
......
...@@ -5,9 +5,9 @@ from unittest.mock import patch ...@@ -5,9 +5,9 @@ from unittest.mock import patch
import torch import torch
from parameterized import parameterized from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import TorchaudioTestCase, skipIfNoModule from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import MockSentencePieceProcessor, MockCustomDataset, MockDataloader from .utils import MockCustomDataset, MockDataloader, MockSentencePieceProcessor
if is_module_available("pytorch_lightning", "sentencepiece"): if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.mustc.lightning import MuSTCRNNTModule from asr.emformer_rnnt.mustc.lightning import MuSTCRNNTModule
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment