Commit fee994ce authored by CodemodService FBSourceBlackLinterBot's avatar CodemodService FBSourceBlackLinterBot Committed by Facebook GitHub Bot
Browse files

[AutoAccept][Codemod][FBSourceBlackLinter] Daily `arc lint --take BLACK`

Summary:
Meta:
**If you take no action, this diff will be automatically accepted on 2022-06-23.**
(To remove yourself from auto-accept diffs and just let them all land, add yourself to [this Butterfly rule](https://www.internalfb.com/butterfly/rule/904302247110220))

Produced by `tools/arcanist/lint/codemods/black-fbsource`.

#nocancel

Rules run:
- CodemodTransformerSimpleShell

Config Oncall: [lint](https://our.intern.facebook.com/intern/oncall3/?shortname=lint)
CodemodConfig: [CodemodConfigFBSourceBlackLinter](https://www.internalfb.com/code/www/flib/intern/codemod_service/config/fbsource_arc_f/CodemodConfigFBSourceBlackLinter.php)
ConfigType: php
Sandcastle URL: https://www.internalfb.com/intern/sandcastle/job/13510799586951394/
This diff was automatically created with CodemodService.
To learn more about CodemodService, check out the [CodemodService wiki](https://fburl.com/CodemodService).

_____

## Questions / Comments / Feedback?

**[Click here to give feedback about this diff](https://www.internalfb.com/codemod_service/feedback?sandcastle_job_id=13510799586951394).**

* Returning back to author or abandoning this diff will only cause the diff to be regenerated in the future.
* Do **NOT** post in the CodemodService Feedback group about this specific diff.

drop-conflicts

Reviewed By: adamjernst

Differential Revision: D37375235

fbshipit-source-id: 3d7eb39e5c0539a78d1412f37562dec90b0fc759
parent b92a8a09
...@@ -2,13 +2,7 @@ import os ...@@ -2,13 +2,7 @@ import os
from pathlib import Path from pathlib import Path
from torchaudio.datasets import speechcommands from torchaudio.datasets import speechcommands
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
_LABELS = [ _LABELS = [
"bed", "bed",
......
...@@ -3,13 +3,7 @@ import platform ...@@ -3,13 +3,7 @@ import platform
from pathlib import Path from pathlib import Path
from torchaudio.datasets import tedlium from torchaudio.datasets import tedlium
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import get_whitenoise, save_wav, skipIfNoSox, TempDirMixin, TorchaudioTestCase
get_whitenoise,
save_wav,
skipIfNoSox,
TempDirMixin,
TorchaudioTestCase,
)
# Used to generate a unique utterance for each dummy audio file # Used to generate a unique utterance for each dummy audio file
_UTTERANCES = [ _UTTERANCES = [
......
...@@ -2,13 +2,7 @@ import os ...@@ -2,13 +2,7 @@ import os
from pathlib import Path from pathlib import Path
from torchaudio.datasets import vctk from torchaudio.datasets import vctk
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
# Used to generate a unique transcript for each dummy audio file # Used to generate a unique transcript for each dummy audio file
_TRANSCRIPT = [ _TRANSCRIPT = [
......
...@@ -2,13 +2,7 @@ import os ...@@ -2,13 +2,7 @@ import os
from pathlib import Path from pathlib import Path
from torchaudio.datasets import yesno from torchaudio.datasets import yesno
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
def get_mock_data(root_dir, labels): def get_mock_data(root_dir, labels):
......
import os import os
from source_separation.utils.dataset import wsj0mix from source_separation.utils.dataset import wsj0mix
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
_FILENAMES = [ _FILENAMES = [
......
import torch import torch
from torchaudio_unittest.common_utils import PytorchTestCase from torchaudio_unittest.common_utils import PytorchTestCase
from .tacotron2_loss_impl import ( from .tacotron2_loss_impl import Tacotron2LossGradcheckTests, Tacotron2LossShapeTests, Tacotron2LossTorchscriptTests
Tacotron2LossGradcheckTests,
Tacotron2LossShapeTests,
Tacotron2LossTorchscriptTests,
)
class TestTacotron2LossShapeFloat32CPU(Tacotron2LossShapeTests, PytorchTestCase): class TestTacotron2LossShapeFloat32CPU(Tacotron2LossShapeTests, PytorchTestCase):
......
import torch import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .tacotron2_loss_impl import ( from .tacotron2_loss_impl import Tacotron2LossGradcheckTests, Tacotron2LossShapeTests, Tacotron2LossTorchscriptTests
Tacotron2LossGradcheckTests,
Tacotron2LossShapeTests,
Tacotron2LossTorchscriptTests,
)
@skipIfNoCuda @skipIfNoCuda
......
...@@ -6,12 +6,7 @@ import torchaudio.functional as F ...@@ -6,12 +6,7 @@ import torchaudio.functional as F
from parameterized import parameterized from parameterized import parameterized
from torch import Tensor from torch import Tensor
from torch.autograd import gradcheck, gradgradcheck from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, rnnt_utils, TestBaseMixin
get_spectrogram,
get_whitenoise,
rnnt_utils,
TestBaseMixin,
)
class Autograd(TestBaseMixin): class Autograd(TestBaseMixin):
......
...@@ -3,11 +3,7 @@ import unittest ...@@ -3,11 +3,7 @@ import unittest
import torch import torch
import torchaudio.functional as F import torchaudio.functional as F
from parameterized import parameterized from parameterized import parameterized
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoSox, TorchaudioTestCase
PytorchTestCase,
skipIfNoSox,
TorchaudioTestCase,
)
from .functional_impl import Functional, FunctionalCPUOnly from .functional_impl import Functional, FunctionalCPUOnly
......
...@@ -13,12 +13,7 @@ if LIBROSA_AVAILABLE: ...@@ -13,12 +13,7 @@ if LIBROSA_AVAILABLE:
import numpy as np import numpy as np
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
get_spectrogram,
get_whitenoise,
nested_params,
TestBaseMixin,
)
@unittest.skipIf(not LIBROSA_AVAILABLE, "Librosa not available") @unittest.skipIf(not LIBROSA_AVAILABLE, "Librosa not available")
......
...@@ -5,12 +5,7 @@ import torch ...@@ -5,12 +5,7 @@ import torch
import torchaudio.functional as F import torchaudio.functional as F
from parameterized import parameterized from parameterized import parameterized
from torchaudio_unittest import common_utils from torchaudio_unittest import common_utils
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import skipIfRocm, TempDirMixin, TestBaseMixin, torch_script
skipIfRocm,
TempDirMixin,
TestBaseMixin,
torch_script,
)
class Functional(TempDirMixin, TestBaseMixin): class Functional(TempDirMixin, TestBaseMixin):
......
...@@ -2,12 +2,7 @@ import itertools ...@@ -2,12 +2,7 @@ import itertools
import torch import torch
from parameterized import parameterized from parameterized import parameterized
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import get_asset_path, skipIfNoCtcDecoder, TempDirMixin, TorchaudioTestCase
get_asset_path,
skipIfNoCtcDecoder,
TempDirMixin,
TorchaudioTestCase,
)
NUM_TOKENS = 8 NUM_TOKENS = 8
......
import torch import torch
from torchaudio_unittest.common_utils import PytorchTestCase from torchaudio_unittest.common_utils import PytorchTestCase
from torchaudio_unittest.models.rnnt_decoder.rnnt_decoder_test_impl import ( from torchaudio_unittest.models.rnnt_decoder.rnnt_decoder_test_impl import RNNTBeamSearchTestImpl
RNNTBeamSearchTestImpl,
)
class RNNTBeamSearchFloat32CPUTest(RNNTBeamSearchTestImpl, PytorchTestCase): class RNNTBeamSearchFloat32CPUTest(RNNTBeamSearchTestImpl, PytorchTestCase):
......
import torch import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from torchaudio_unittest.models.rnnt_decoder.rnnt_decoder_test_impl import ( from torchaudio_unittest.models.rnnt_decoder.rnnt_decoder_test_impl import RNNTBeamSearchTestImpl
RNNTBeamSearchTestImpl,
)
@skipIfNoCuda @skipIfNoCuda
......
import torch import torch
from torchaudio_unittest.common_utils import PytorchTestCase from torchaudio_unittest.common_utils import PytorchTestCase
from .model_test_impl import ( from .model_test_impl import Tacotron2DecoderTests, Tacotron2EncoderTests, Tacotron2Tests
Tacotron2DecoderTests,
Tacotron2EncoderTests,
Tacotron2Tests,
)
class TestTacotron2EncoderFloat32CPU(Tacotron2EncoderTests, PytorchTestCase): class TestTacotron2EncoderFloat32CPU(Tacotron2EncoderTests, PytorchTestCase):
......
import torch import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .model_test_impl import ( from .model_test_impl import Tacotron2DecoderTests, Tacotron2EncoderTests, Tacotron2Tests
Tacotron2DecoderTests,
Tacotron2EncoderTests,
Tacotron2Tests,
)
@skipIfNoCuda @skipIfNoCuda
......
...@@ -11,11 +11,7 @@ from torchaudio.models.wav2vec2 import ( ...@@ -11,11 +11,7 @@ from torchaudio.models.wav2vec2 import (
wav2vec2_large_lv60k, wav2vec2_large_lv60k,
) )
from torchaudio.models.wav2vec2.utils import import_fairseq_model from torchaudio.models.wav2vec2.utils import import_fairseq_model
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import get_asset_path, skipIfNoModule, TorchaudioTestCase
get_asset_path,
skipIfNoModule,
TorchaudioTestCase,
)
def _load_config(*paths): def _load_config(*paths):
...@@ -102,10 +98,7 @@ class TestFairseqIntegration(TorchaudioTestCase): ...@@ -102,10 +98,7 @@ class TestFairseqIntegration(TorchaudioTestCase):
from fairseq.models.hubert.hubert import HubertConfig, HubertModel from fairseq.models.hubert.hubert import HubertConfig, HubertModel
from fairseq.models.hubert.hubert_asr import HubertCtcConfig, HubertEncoder from fairseq.models.hubert.hubert_asr import HubertCtcConfig, HubertEncoder
from fairseq.models.wav2vec.wav2vec2 import Wav2Vec2Config, Wav2Vec2Model from fairseq.models.wav2vec.wav2vec2 import Wav2Vec2Config, Wav2Vec2Model
from fairseq.models.wav2vec.wav2vec2_asr import ( from fairseq.models.wav2vec.wav2vec2_asr import Wav2Vec2CtcConfig, Wav2VecEncoder
Wav2Vec2CtcConfig,
Wav2VecEncoder,
)
from fairseq.tasks.hubert_pretraining import HubertPretrainingConfig from fairseq.tasks.hubert_pretraining import HubertPretrainingConfig
from omegaconf import OmegaConf from omegaconf import OmegaConf
......
...@@ -2,17 +2,9 @@ import json ...@@ -2,17 +2,9 @@ import json
import torch import torch
from parameterized import parameterized from parameterized import parameterized
from torchaudio.models.wav2vec2 import ( from torchaudio.models.wav2vec2 import wav2vec2_base, wav2vec2_large, wav2vec2_large_lv60k
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
)
from torchaudio.models.wav2vec2.utils import import_huggingface_model from torchaudio.models.wav2vec2.utils import import_huggingface_model
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import get_asset_path, skipIfNoModule, TorchaudioTestCase
get_asset_path,
skipIfNoModule,
TorchaudioTestCase,
)
def _load_config(*paths): def _load_config(*paths):
...@@ -76,11 +68,7 @@ class TestHFIntegration(TorchaudioTestCase): ...@@ -76,11 +68,7 @@ class TestHFIntegration(TorchaudioTestCase):
# However, somehow, once "transformers" is imported, `is_module_available` # However, somehow, once "transformers" is imported, `is_module_available`
# starts to fail. Therefore, we defer importing "transformers" until # starts to fail. Therefore, we defer importing "transformers" until
# the actual tests are started. # the actual tests are started.
from transformers.models.wav2vec2 import ( from transformers.models.wav2vec2 import Wav2Vec2Config, Wav2Vec2ForCTC, Wav2Vec2Model
Wav2Vec2Config,
Wav2Vec2ForCTC,
Wav2Vec2Model,
)
if config["architectures"] == ["Wav2Vec2Model"]: if config["architectures"] == ["Wav2Vec2Model"]:
return Wav2Vec2Model(Wav2Vec2Config(**config)) return Wav2Vec2Model(Wav2Vec2Config(**config))
......
...@@ -12,12 +12,7 @@ from torchaudio.models.wav2vec2 import ( ...@@ -12,12 +12,7 @@ from torchaudio.models.wav2vec2 import (
wav2vec2_large, wav2vec2_large,
wav2vec2_large_lv60k, wav2vec2_large_lv60k,
) )
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import skipIfNoCuda, skipIfNoQengine, torch_script, TorchaudioTestCase
skipIfNoCuda,
skipIfNoQengine,
torch_script,
TorchaudioTestCase,
)
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2]) TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION >= (1, 10): if TORCH_VERSION >= (1, 10):
......
...@@ -8,13 +8,7 @@ from unittest import skipIf ...@@ -8,13 +8,7 @@ from unittest import skipIf
import numpy as np import numpy as np
import torch import torch
import torchaudio import torchaudio
from torchaudio_unittest.common_utils import ( from torchaudio_unittest.common_utils import get_whitenoise, PytorchTestCase, save_wav, skipIfNoSox, TempDirMixin
get_whitenoise,
PytorchTestCase,
save_wav,
skipIfNoSox,
TempDirMixin,
)
class RandomPerturbationFile(torch.utils.data.Dataset): class RandomPerturbationFile(torch.utils.data.Dataset):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment