Commit d62875cc authored by John Reese's avatar John Reese Committed by Facebook GitHub Bot
Browse files

[codemod][usort] apply import merging for fbcode (8 of 11)

Summary:
Applies new import merging and sorting from µsort v1.0.

When merging imports, µsort will make a best-effort to move associated
comments to match merged elements, but there are known limitations due to
the diynamic nature of Python and developer tooling. These changes should
not produce any dangerous runtime changes, but may require touch-ups to
satisfy linters and other tooling.

Note that µsort uses case-insensitive, lexicographical sorting, which
results in a different ordering compared to isort. This provides a more
consistent sorting order, matching the case-insensitive order used when
sorting import statements by module name, and ensures that "frog", "FROG",
and "Frog" always sort next to each other.

For details on µsort's sorting and merging semantics, see the user guide:
https://usort.readthedocs.io/en/stable/guide.html#sorting

Reviewed By: lisroach

Differential Revision: D36402214

fbshipit-source-id: b641bfa9d46242188524d4ae2c44998922a62b4c
parent 44f4a5ea
......@@ -6,9 +6,7 @@ import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
extract_archive,
)
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
......
......@@ -5,9 +5,7 @@ import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
extract_archive,
)
from torchaudio.datasets.utils import extract_archive
URL = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip"
_CHECKSUMS = {
......
......@@ -6,9 +6,7 @@ import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
extract_archive,
)
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
......
......@@ -6,9 +6,9 @@ from .filtering import (
bass_biquad,
biquad,
contrast,
dither,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
......@@ -24,34 +24,34 @@ from .filtering import (
)
from .functional import (
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
compute_kaldi_pitch,
create_dct,
melscale_fbanks,
linear_fbanks,
DB_to_amplitude,
detect_pitch_frequency,
inverse_spectrogram,
edit_distance,
griffinlim,
inverse_spectrogram,
linear_fbanks,
mask_along_axis,
mask_along_axis_iid,
mu_law_encoding,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
sliding_window_cmn,
spectrogram,
spectral_centroid,
apply_codec,
resample,
edit_distance,
pitch_shift,
rnnt_loss,
psd,
mvdr_weights_souden,
mvdr_weights_rtf,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
apply_beamforming,
sliding_window_cmn,
spectral_centroid,
spectrogram,
)
__all__ = [
......
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional, Tuple, Dict, Iterator
from typing import Dict, Iterator, Optional, Tuple
import torch
import torchaudio
......
......@@ -2,24 +2,24 @@ from .conformer import Conformer
from .conv_tasnet import ConvTasNet
from .deepspeech import DeepSpeech
from .emformer import Emformer
from .rnnt import RNNT, emformer_rnnt_base, emformer_rnnt_model
from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT
from .rnnt_decoder import Hypothesis, RNNTBeamSearch
from .tacotron2 import Tacotron2
from .wav2letter import Wav2Letter
from .wav2vec2 import (
Wav2Vec2Model,
HuBERTPretrainModel,
wav2vec2_model,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
hubert_base,
hubert_large,
hubert_xlarge,
hubert_pretrain_model,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
)
from .wavernn import WaveRNN
......
......@@ -3,7 +3,7 @@
Based on https://github.com/naplab/Conv-TasNet/tree/e66d82a8f956a69749ec8a4ae382217faa097c5c
"""
from typing import Tuple, Optional
from typing import Optional, Tuple
import torch
......
......@@ -26,11 +26,10 @@
# *****************************************************************************
import warnings
from typing import Tuple, List, Optional, Union
from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor
from torch import nn
from torch import nn, Tensor
from torch.nn import functional as F
......
from torch import Tensor
from torch import nn
from torch import nn, Tensor
__all__ = [
"Wav2Letter",
......
from . import utils
from .model import (
Wav2Vec2Model,
HuBERTPretrainModel,
wav2vec2_model,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
hubert_base,
hubert_large,
hubert_xlarge,
hubert_pretrain_model,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
)
__all__ = [
......
import logging
from typing import Optional, Tuple, List
from typing import List, Optional, Tuple
import torch
from torch import Tensor, nn
from torch import nn, Tensor
from torch.nn import Module, Parameter
_LG = logging.getLogger(__name__)
......
from typing import Optional, Tuple, List
from typing import List, Optional, Tuple
import torch
from torch import Tensor
......
......@@ -6,7 +6,7 @@ import re
from torch.nn import Module
from ..model import Wav2Vec2Model, wav2vec2_model
from ..model import wav2vec2_model, Wav2Vec2Model
def _parse_config(w2v_model):
......
......@@ -4,7 +4,7 @@ import logging
from torch.nn import Module
from ..model import Wav2Vec2Model, wav2vec2_model
from ..model import wav2vec2_model, Wav2Vec2Model
_LG = logging.getLogger(__name__)
......
import math
from typing import List, Tuple, Optional
from typing import List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
from torch import nn
from torch import nn, Tensor
__all__ = [
"ResBlock",
......
from ._tts import (
Tacotron2TTSBundle,
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
Tacotron2TTSBundle,
)
from ._wav2vec2.impl import (
Wav2Vec2Bundle,
Wav2Vec2ASRBundle,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_ASR_BASE_10M,
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
Wav2Vec2ASRBundle,
Wav2Vec2Bundle,
)
from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
......
import re
from dataclasses import dataclass
from typing import Union, Optional, Dict, Any, Tuple, List
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import Tensor
from torchaudio._internal import load_state_dict_from_url
from torchaudio.functional import mu_law_decoding
from torchaudio.models import Tacotron2, WaveRNN
from torchaudio.transforms import InverseMelScale, GriffinLim
from torchaudio.transforms import GriffinLim, InverseMelScale
from . import utils
from .interface import Tacotron2TTSBundle
......
from abc import ABC, abstractmethod
from typing import Union, List, Tuple, Optional
from typing import List, Optional, Tuple, Union
from torch import Tensor
from torchaudio.models import Tacotron2
......
......@@ -2,10 +2,7 @@ import logging
import os
import torch
from torchaudio._internal import (
download_url_to_file,
module_utils as _mod_utils,
)
from torchaudio._internal import download_url_to_file, module_utils as _mod_utils
def _get_chars():
......
from dataclasses import dataclass
from typing import Dict, Tuple, Any
from typing import Any, Dict, Tuple
import torch
from torchaudio._internal import load_state_dict_from_url
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment