Commit d62875cc authored by John Reese's avatar John Reese Committed by Facebook GitHub Bot
Browse files

[codemod][usort] apply import merging for fbcode (8 of 11)

Summary:
Applies new import merging and sorting from µsort v1.0.

When merging imports, µsort will make a best-effort to move associated
comments to match merged elements, but there are known limitations due to
the diynamic nature of Python and developer tooling. These changes should
not produce any dangerous runtime changes, but may require touch-ups to
satisfy linters and other tooling.

Note that µsort uses case-insensitive, lexicographical sorting, which
results in a different ordering compared to isort. This provides a more
consistent sorting order, matching the case-insensitive order used when
sorting import statements by module name, and ensures that "frog", "FROG",
and "Frog" always sort next to each other.

For details on µsort's sorting and merging semantics, see the user guide:
https://usort.readthedocs.io/en/stable/guide.html#sorting

Reviewed By: lisroach

Differential Revision: D36402214

fbshipit-source-id: b641bfa9d46242188524d4ae2c44998922a62b4c
parent 44f4a5ea
...@@ -6,9 +6,7 @@ import torchaudio ...@@ -6,9 +6,7 @@ import torchaudio
from torch import Tensor from torch import Tensor
from torch.hub import download_url_to_file from torch.hub import download_url_to_file
from torch.utils.data import Dataset from torch.utils.data import Dataset
from torchaudio.datasets.utils import ( from torchaudio.datasets.utils import extract_archive
extract_archive,
)
_RELEASE_CONFIGS = { _RELEASE_CONFIGS = {
......
...@@ -5,9 +5,7 @@ import torchaudio ...@@ -5,9 +5,7 @@ import torchaudio
from torch import Tensor from torch import Tensor
from torch.hub import download_url_to_file from torch.hub import download_url_to_file
from torch.utils.data import Dataset from torch.utils.data import Dataset
from torchaudio.datasets.utils import ( from torchaudio.datasets.utils import extract_archive
extract_archive,
)
URL = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip" URL = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip"
_CHECKSUMS = { _CHECKSUMS = {
......
...@@ -6,9 +6,7 @@ import torchaudio ...@@ -6,9 +6,7 @@ import torchaudio
from torch import Tensor from torch import Tensor
from torch.hub import download_url_to_file from torch.hub import download_url_to_file
from torch.utils.data import Dataset from torch.utils.data import Dataset
from torchaudio.datasets.utils import ( from torchaudio.datasets.utils import extract_archive
extract_archive,
)
_RELEASE_CONFIGS = { _RELEASE_CONFIGS = {
......
...@@ -6,9 +6,9 @@ from .filtering import ( ...@@ -6,9 +6,9 @@ from .filtering import (
bass_biquad, bass_biquad,
biquad, biquad,
contrast, contrast,
dither,
dcshift, dcshift,
deemph_biquad, deemph_biquad,
dither,
equalizer_biquad, equalizer_biquad,
filtfilt, filtfilt,
flanger, flanger,
...@@ -24,34 +24,34 @@ from .filtering import ( ...@@ -24,34 +24,34 @@ from .filtering import (
) )
from .functional import ( from .functional import (
amplitude_to_DB, amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas, compute_deltas,
compute_kaldi_pitch, compute_kaldi_pitch,
create_dct, create_dct,
melscale_fbanks,
linear_fbanks,
DB_to_amplitude, DB_to_amplitude,
detect_pitch_frequency, detect_pitch_frequency,
inverse_spectrogram, edit_distance,
griffinlim, griffinlim,
inverse_spectrogram,
linear_fbanks,
mask_along_axis, mask_along_axis,
mask_along_axis_iid, mask_along_axis_iid,
mu_law_encoding, melscale_fbanks,
mu_law_decoding, mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder, phase_vocoder,
sliding_window_cmn,
spectrogram,
spectral_centroid,
apply_codec,
resample,
edit_distance,
pitch_shift, pitch_shift,
rnnt_loss,
psd, psd,
mvdr_weights_souden, resample,
mvdr_weights_rtf, rnnt_loss,
rtf_evd, rtf_evd,
rtf_power, rtf_power,
apply_beamforming, sliding_window_cmn,
spectral_centroid,
spectrogram,
) )
__all__ = [ __all__ = [
......
from __future__ import annotations from __future__ import annotations
from dataclasses import dataclass from dataclasses import dataclass
from typing import Optional, Tuple, Dict, Iterator from typing import Dict, Iterator, Optional, Tuple
import torch import torch
import torchaudio import torchaudio
......
...@@ -2,24 +2,24 @@ from .conformer import Conformer ...@@ -2,24 +2,24 @@ from .conformer import Conformer
from .conv_tasnet import ConvTasNet from .conv_tasnet import ConvTasNet
from .deepspeech import DeepSpeech from .deepspeech import DeepSpeech
from .emformer import Emformer from .emformer import Emformer
from .rnnt import RNNT, emformer_rnnt_base, emformer_rnnt_model from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT
from .rnnt_decoder import Hypothesis, RNNTBeamSearch from .rnnt_decoder import Hypothesis, RNNTBeamSearch
from .tacotron2 import Tacotron2 from .tacotron2 import Tacotron2
from .wav2letter import Wav2Letter from .wav2letter import Wav2Letter
from .wav2vec2 import ( from .wav2vec2 import (
Wav2Vec2Model,
HuBERTPretrainModel,
wav2vec2_model,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
hubert_base, hubert_base,
hubert_large, hubert_large,
hubert_xlarge,
hubert_pretrain_model,
hubert_pretrain_base, hubert_pretrain_base,
hubert_pretrain_large, hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge, hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
) )
from .wavernn import WaveRNN from .wavernn import WaveRNN
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
Based on https://github.com/naplab/Conv-TasNet/tree/e66d82a8f956a69749ec8a4ae382217faa097c5c Based on https://github.com/naplab/Conv-TasNet/tree/e66d82a8f956a69749ec8a4ae382217faa097c5c
""" """
from typing import Tuple, Optional from typing import Optional, Tuple
import torch import torch
......
...@@ -26,11 +26,10 @@ ...@@ -26,11 +26,10 @@
# ***************************************************************************** # *****************************************************************************
import warnings import warnings
from typing import Tuple, List, Optional, Union from typing import List, Optional, Tuple, Union
import torch import torch
from torch import Tensor from torch import nn, Tensor
from torch import nn
from torch.nn import functional as F from torch.nn import functional as F
......
from torch import Tensor from torch import nn, Tensor
from torch import nn
__all__ = [ __all__ = [
"Wav2Letter", "Wav2Letter",
......
from . import utils from . import utils
from .model import ( from .model import (
Wav2Vec2Model,
HuBERTPretrainModel,
wav2vec2_model,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
hubert_base, hubert_base,
hubert_large, hubert_large,
hubert_xlarge,
hubert_pretrain_model,
hubert_pretrain_base, hubert_pretrain_base,
hubert_pretrain_large, hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge, hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
) )
__all__ = [ __all__ = [
......
import logging import logging
from typing import Optional, Tuple, List from typing import List, Optional, Tuple
import torch import torch
from torch import Tensor, nn from torch import nn, Tensor
from torch.nn import Module, Parameter from torch.nn import Module, Parameter
_LG = logging.getLogger(__name__) _LG = logging.getLogger(__name__)
......
from typing import Optional, Tuple, List from typing import List, Optional, Tuple
import torch import torch
from torch import Tensor from torch import Tensor
......
...@@ -6,7 +6,7 @@ import re ...@@ -6,7 +6,7 @@ import re
from torch.nn import Module from torch.nn import Module
from ..model import Wav2Vec2Model, wav2vec2_model from ..model import wav2vec2_model, Wav2Vec2Model
def _parse_config(w2v_model): def _parse_config(w2v_model):
......
...@@ -4,7 +4,7 @@ import logging ...@@ -4,7 +4,7 @@ import logging
from torch.nn import Module from torch.nn import Module
from ..model import Wav2Vec2Model, wav2vec2_model from ..model import wav2vec2_model, Wav2Vec2Model
_LG = logging.getLogger(__name__) _LG = logging.getLogger(__name__)
......
import math import math
from typing import List, Tuple, Optional from typing import List, Optional, Tuple
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from torch import Tensor from torch import nn, Tensor
from torch import nn
__all__ = [ __all__ = [
"ResBlock", "ResBlock",
......
from ._tts import ( from ._tts import (
Tacotron2TTSBundle,
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH, TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH, TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH, TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH, TACOTRON2_WAVERNN_PHONE_LJSPEECH,
Tacotron2TTSBundle,
) )
from ._wav2vec2.impl import ( from ._wav2vec2.impl import (
Wav2Vec2Bundle, HUBERT_ASR_LARGE,
Wav2Vec2ASRBundle, HUBERT_ASR_XLARGE,
WAV2VEC2_BASE, HUBERT_BASE,
WAV2VEC2_LARGE, HUBERT_LARGE,
WAV2VEC2_LARGE_LV60K, HUBERT_XLARGE,
WAV2VEC2_ASR_BASE_10M, VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H, WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H, WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_100H, WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H, WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_100H, WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H, WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53, WAV2VEC2_XLSR53,
VOXPOPULI_ASR_BASE_10K_EN, Wav2Vec2ASRBundle,
VOXPOPULI_ASR_BASE_10K_ES, Wav2Vec2Bundle,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
) )
from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
......
import re import re
from dataclasses import dataclass from dataclasses import dataclass
from typing import Union, Optional, Dict, Any, Tuple, List from typing import Any, Dict, List, Optional, Tuple, Union
import torch import torch
from torch import Tensor from torch import Tensor
from torchaudio._internal import load_state_dict_from_url from torchaudio._internal import load_state_dict_from_url
from torchaudio.functional import mu_law_decoding from torchaudio.functional import mu_law_decoding
from torchaudio.models import Tacotron2, WaveRNN from torchaudio.models import Tacotron2, WaveRNN
from torchaudio.transforms import InverseMelScale, GriffinLim from torchaudio.transforms import GriffinLim, InverseMelScale
from . import utils from . import utils
from .interface import Tacotron2TTSBundle from .interface import Tacotron2TTSBundle
......
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Union, List, Tuple, Optional from typing import List, Optional, Tuple, Union
from torch import Tensor from torch import Tensor
from torchaudio.models import Tacotron2 from torchaudio.models import Tacotron2
......
...@@ -2,10 +2,7 @@ import logging ...@@ -2,10 +2,7 @@ import logging
import os import os
import torch import torch
from torchaudio._internal import ( from torchaudio._internal import download_url_to_file, module_utils as _mod_utils
download_url_to_file,
module_utils as _mod_utils,
)
def _get_chars(): def _get_chars():
......
from dataclasses import dataclass from dataclasses import dataclass
from typing import Dict, Tuple, Any from typing import Any, Dict, Tuple
import torch import torch
from torchaudio._internal import load_state_dict_from_url from torchaudio._internal import load_state_dict_from_url
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment