From ffeba11aff8bc9f1c3f7b214da74e44f97f456ef Mon Sep 17 00:00:00 2001 From: mayp777 Date: Mon, 2 Sep 2024 20:24:59 +0800 Subject: [PATCH] UPDATE --- .circleci/build_docs/commit_docs.sh | 35 - .circleci/build_docs/install_wheels.sh | 15 - .circleci/config.yml | 4096 ----------------- .circleci/config.yml.in | 924 ---- .circleci/regenerate.py | 278 -- .circleci/smoke_test/docker/Dockerfile | 36 - .circleci/smoke_test/docker/build_and_push.sh | 8 - .circleci/unittest/linux/README.md | 6 - .circleci/unittest/linux/docker/.dockerignore | 2 - .circleci/unittest/linux/docker/.gitignore | 2 - .circleci/unittest/linux/docker/Dockerfile | 56 - .../unittest/linux/docker/build_and_push.sh | 26 - .../docker/scripts/copy_kaldi_executables.sh | 58 - .circleci/unittest/linux/scripts/install.sh | 81 - .../linux/scripts/run_clang_format.py | 310 -- .../linux/scripts/run_style_checks.sh | 49 - .circleci/unittest/linux/scripts/run_test.sh | 22 - .circleci/unittest/linux/scripts/setup_env.sh | 39 - .circleci/unittest/windows/README.md | 4 - .../unittest/windows/scripts/environment.yml | 16 - .circleci/unittest/windows/scripts/install.sh | 99 - .../windows/scripts/install_conda.bat | 1 - .../unittest/windows/scripts/run_test.sh | 16 - .../unittest/windows/scripts/set_cuda_envs.sh | 44 - .../unittest/windows/scripts/setup_env.sh | 38 - .flake8 | 12 +- .gitignore | 1 + .gitmodules | 10 - .pre-commit-config.yaml | 29 +- CITATION | 12 +- CMakeLists.txt | 80 +- CONTRIBUTING.md | 7 +- README.md | 161 +- README_ORIGIN.md | 125 +- cmake/TorchAudioHelper.cmake | 33 +- docs/Makefile | 8 + docs/post_process_dispatcher.py | 16 + docs/requirements-tutorials.txt | 2 +- docs/requirements.txt | 7 +- docs/source/Doxyfile | 2727 +++++++++++ docs/source/_static/css/custom.css | 40 + .../_templates/autosummary/bundle_class.rst | 54 +- .../autosummary/cuda_ctc_decoder_class.rst | 51 + docs/source/_templates/autosummary/io.rst | 19 + .../_templates/autosummary/io_class.rst | 49 +- .../_templates/autosummary/model_class.rst | 118 +- docs/source/_templates/breadcrumbs.html | 97 + docs/source/_templates/layout.html | 29 +- docs/source/build.ffmpeg.rst | 473 ++ docs/source/build.jetson.rst | 159 + docs/source/build.linux.rst | 60 + docs/source/build.rst | 40 + docs/source/build.windows.rst | 245 + docs/source/conf.py | 41 +- docs/source/functional.rst | 18 +- docs/source/index.rst | 95 +- docs/source/installation.rst | 177 + docs/source/io.rst | 2 + docs/source/models.decoder.rst | 16 + docs/source/models.rst | 45 +- docs/source/pipelines.rst | 116 +- docs/source/refs.bib | 203 +- docs/source/sox_effects.rst | 2 - docs/source/torchaudio.rst | 117 +- docs/source/transforms.rst | 11 +- examples/asr/emformer_rnnt/README.md | 4 +- examples/asr/emformer_rnnt/global_stats.py | 2 +- examples/asr/emformer_rnnt/pipeline_demo.py | 18 +- .../emformer_rnnt/tedlium3/eval_pipeline.py | 90 + .../emformer_rnnt/tedlium3/global_stats.json | 166 + .../asr/emformer_rnnt/tedlium3/lightning.py | 233 + .../asr/emformer_rnnt/tedlium3/train_spm.py | 89 + .../asr/librispeech_conformer_rnnt/README.md | 49 + .../librispeech_conformer_rnnt/data_module.py | 194 + .../asr/librispeech_conformer_rnnt/eval.py | 79 + .../global_stats.json | 166 + .../librispeech_conformer_rnnt/lightning.py | 159 + .../asr/librispeech_conformer_rnnt/train.py | 111 + .../librispeech_conformer_rnnt/train_spm.py | 80 + .../librispeech_conformer_rnnt/transforms.py | 119 + .../README.md | 76 + .../blists/README.md | 7 + .../data_module.py | 204 + .../error_analysis/get_error_word_count.py | 112 + .../eval.py | 124 + .../global_stats_100.json | 166 + .../lightning.py | 204 + .../score.sh | 9 + .../train.py | 154 + .../train_spm.py | 89 + .../transforms.py | 198 + .../asr/librispeech_ctc_decoder/README.md | 17 +- .../librispeech_cuda_ctc_decoder/README.md | 36 + .../librispeech_cuda_ctc_decoder/inference.py | 196 + examples/avsr/README.md | 73 + examples/avsr/average_checkpoints.py | 28 + examples/avsr/data_module.py | 134 + examples/avsr/data_prep/README.md | 72 + examples/avsr/data_prep/data/data_module.py | 52 + .../data_prep/detectors/mediapipe/detector.py | 50 + .../detectors/mediapipe/video_process.py | 158 + .../detectors/retinaface/detector.py | 29 + .../detectors/retinaface/video_process.py | 158 + examples/avsr/data_prep/merge.py | 80 + examples/avsr/data_prep/preprocess_lrs3.py | 211 + examples/avsr/data_prep/requirements.txt | 4 + examples/avsr/data_prep/tools/README.md | 28 + examples/avsr/data_prep/utils.py | 87 + examples/avsr/eval.py | 100 + examples/avsr/lightning.py | 148 + examples/avsr/lightning_av.py | 153 + examples/avsr/lrs3.py | 76 + examples/avsr/models/conformer_rnnt.py | 25 + examples/avsr/models/emformer_rnnt.py | 28 + examples/avsr/models/fusion.py | 36 + examples/avsr/models/resnet.py | 237 + examples/avsr/models/resnet1d.py | 233 + examples/avsr/schedulers.py | 28 + examples/avsr/train.py | 147 + examples/avsr/train_spm.py | 80 + examples/avsr/transforms.py | 173 + examples/dnn_beamformer/README.md | 63 + examples/dnn_beamformer/datamodule.py | 101 + examples/dnn_beamformer/eval.py | 90 + examples/dnn_beamformer/model.py | 72 + examples/dnn_beamformer/train.py | 86 + examples/dnn_beamformer/utils.py | 58 + examples/hubert/dataset/hubert_dataset.py | 24 +- examples/hubert/finetune.py | 13 +- examples/hubert/lightning_modules.py | 540 +++ examples/hubert/train.py | 14 +- .../augmentation/create_jittable_pipeline.py | 2 +- .../text/text_preprocessing.py | 4 +- examples/pipeline_wav2letter/main.py | 8 +- examples/self_supervised_learning/README.md | 27 + .../data_modules/__init__.py | 6 + .../data_modules/_hubert_datamodule.py | 65 + .../data_modules/_utils.py | 488 ++ .../data_modules/_wav2vec2_datamodule.py | 75 + .../lightning_modules.py | 61 + .../losses/__init__.py | 6 + .../losses/_hubert_loss.py | 47 + .../losses/_wav2vec2_loss.py | 80 + .../lr_schedulers/__init__.py | 5 + .../lr_schedulers/_linear_decay.py | 27 + .../self_supervised_learning/train_hubert.py | 316 ++ examples/source_separation/eval.py | 4 +- examples/source_separation/lightning_train.py | 5 +- ...asr_inference_with_ctc_decoder_tutorial.py | 93 +- ...nference_with_cuda_ctc_decoder_tutorial.py | 311 ++ .../audio_data_augmentation_tutorial.py | 288 +- examples/tutorials/audio_datasets_tutorial.py | 72 +- .../audio_feature_augmentation_tutorial.py | 113 +- .../audio_feature_extractions_tutorial.py | 273 +- examples/tutorials/audio_io_tutorial.py | 47 +- .../tutorials/audio_resampling_tutorial.py | 93 +- .../ctc_forced_alignment_api_tutorial.py | 517 +++ examples/tutorials/device_asr.py | 33 +- examples/tutorials/effector_tutorial.py | 366 ++ ...lignment_for_multilingual_data_tutorial.py | 600 +++ .../tutorials/forced_alignment_tutorial.py | 234 +- examples/tutorials/hybrid_demucs_tutorial.py | 77 +- examples/tutorials/mvdr_tutorial.py | 18 +- examples/tutorials/nvdec_tutorial.py | 791 ++++ examples/tutorials/nvenc_tutorial.py | 388 ++ examples/tutorials/online_asr_tutorial.py | 92 +- .../speech_recognition_pipeline_tutorial.py | 5 +- examples/tutorials/squim_tutorial.py | 390 ++ .../streamreader_advanced_tutorial.py | 31 +- .../tutorials/streamreader_basic_tutorial.py | 33 +- examples/tutorials/streamwriter_advanced.py | 38 +- .../tutorials/streamwriter_basic_tutorial.py | 99 +- .../tutorials/tacotron2_pipeline_tutorial.py | 66 +- packaging/torchaudio/meta.yaml | 7 +- packaging/vs2019/conda_build_config.yaml | 4 +- packaging/windows/internal/cuda_install.bat | 26 +- pyproject.toml | 5 +- setup.py | 57 +- test/integration_tests/conftest.py | 2 +- .../prototype/hifi_gan_pipeline_test.py | 58 + .../prototype/vggish_pipeline_test.py | 16 + test/integration_tests/rnnt_pipeline_test.py | 3 + test/integration_tests/squim_pipeline_test.py | 43 + .../wav2vec2_pipeline_test.py | 25 +- test/smoke_test/smoke_test.py | 72 +- test/smoke_test/smoke_test_no_ffmpeg.py | 3 + test/tools/make_test_env.sh | 138 + test/tools/rocky8_make_test_env.sh | 136 + .../RATRACE_wave_f_nm_np1_fr_goo_37.avi | Bin 0 -> 263680 bytes test/torchaudio_unittest/assets/README.md | 5 + .../torchaudio_unittest/assets/nasa_13013.avi | Bin 0 -> 637486 bytes test/torchaudio_unittest/assets/testsrc.hevc | Bin 0 -> 28672 bytes .../fairseq/generate_hubert_model_config.py | 5 +- .../assets/wav2vec2/fairseq/xlsr_1b.json | 51 + .../assets/wav2vec2/fairseq/xlsr_2b.json | 51 + .../assets/wav2vec2/fairseq/xlsr_300m.json | 51 + .../huggingface/wav2vec2-xls-r-1b.json | 76 + .../huggingface/wav2vec2-xls-r-2b.json | 75 + .../huggingface/wav2vec2-xls-r-300m.json | 76 + .../wav2vec2/huggingface/wavlm-base.json | 98 + .../wav2vec2/huggingface/wavlm-large.json | 98 + .../backend/dispatcher/__init__.py | 0 .../backend/dispatcher/dispatcher_test.py | 129 + .../backend/dispatcher/ffmpeg/__init__.py | 0 .../backend/dispatcher/ffmpeg/info_test.py | 611 +++ .../backend/dispatcher/ffmpeg/load_test.py | 617 +++ .../backend/dispatcher/ffmpeg/save_test.py | 455 ++ .../backend/dispatcher/smoke_test.py | 56 + .../backend/dispatcher/soundfile/__init__.py | 0 .../backend/dispatcher/soundfile/common.py | 56 + .../backend/dispatcher/soundfile/info_test.py | 191 + .../backend/dispatcher/soundfile/load_test.py | 369 ++ .../backend/dispatcher/soundfile/save_test.py | 319 ++ .../backend/dispatcher/sox/__init__.py | 0 .../backend/dispatcher/sox/common.py | 14 + .../backend/dispatcher/sox/info_test.py | 398 ++ .../backend/dispatcher/sox/load_test.py | 369 ++ .../backend/dispatcher/sox/roundtrip_test.py | 59 + .../backend/dispatcher/sox/save_test.py | 416 ++ .../backend/dispatcher/sox/smoke_test.py | 80 + .../backend/soundfile/info_test.py | 1 - .../backend/sox_io/info_test.py | 285 +- .../backend/sox_io/load_test.py | 300 +- .../backend/sox_io/save_test.py | 101 +- .../backend/sox_io/smoke_test.py | 86 - .../backend/sox_io/torchscript_test.py | 10 +- .../torchaudio_unittest/backend/utils_test.py | 6 +- .../common_utils/__init__.py | 26 +- .../common_utils/autograd_utils.py | 20 + .../common_utils/case_utils.py | 131 +- .../common_utils/rnnt_utils.py | 11 +- .../emformer_rnnt/test_mustc_lightning.py | 76 + .../emformer_rnnt/test_tedlium3_lightning.py | 79 + .../functional/autograd_cuda_test.py | 1 + .../functional/autograd_impl.py | 66 +- .../functional/batch_consistency_test.py | 109 +- .../functional/functional_cuda_test.py | 18 +- .../functional/functional_impl.py | 485 +- .../kaldi_compatibility_cpu_test.py | 7 +- .../kaldi_compatibility_test_impl.py | 32 +- .../librosa_compatibility_cuda_test.py | 4 +- .../librosa_compatibility_test_impl.py | 4 +- .../torchscript_consistency_cuda_test.py | 3 +- .../torchscript_consistency_impl.py | 66 +- test/torchaudio_unittest/io/common.py | 16 + test/torchaudio_unittest/io/effector_test.py | 102 + test/torchaudio_unittest/io/playback_test.py | 65 + .../io/stream_reader_test.py | 678 ++- .../io/stream_writer_test.py | 491 +- .../models/decoder/ctc_decoder_test.py | 16 + .../models/decoder/cuda_ctc_decoder_test.py | 49 + .../rnnt_decoder/rnnt_decoder_test_impl.py | 4 +- .../models/squim/__init__.py | 0 .../models/squim/squim_test.py | 113 + .../models/tacotron2/model_test_impl.py | 6 +- .../wav2vec2/fairseq_integration_test.py | 50 +- .../wav2vec2/huggingface_intergration_test.py | 153 +- .../models/wav2vec2/model_test.py | 135 + .../sox_effect/dataset_test.py | 8 +- .../sox_effect/smoke_test.py | 21 - .../sox_effect/sox_effect_test.py | 143 - .../transforms/autograd_cuda_test.py | 3 +- .../transforms/autograd_test_impl.py | 68 +- .../transforms/batch_consistency_test.py | 125 +- .../librosa_compatibility_cuda_test.py | 3 +- .../librosa_compatibility_test_impl.py | 4 +- .../torchscript_consistency_cuda_test.py | 3 +- .../torchscript_consistency_impl.py | 92 +- .../transforms/transforms_cuda_test.py | 3 +- .../transforms/transforms_test.py | 8 +- .../transforms/transforms_test_impl.py | 328 +- .../utils/ffmpeg_utils_test.py | 14 + third_party/LICENSES_BUNDLED.txt | 11 + third_party/ffmpeg/multi/CMakeLists.txt | 208 + third_party/ffmpeg/single/CMakeLists.txt | 40 + third_party/sox/CMakeLists.txt | 227 +- third_party/sox/stub.c | 85 + tools/release_notes/classify_prs.py | 6 +- tools/setup_helpers/extension.py | 67 +- torchaudio/__init__.py | 24 +- torchaudio/_backend/__init__.py | 57 + torchaudio/_backend/backend.py | 53 + torchaudio/_backend/common.py | 52 + torchaudio/_backend/ffmpeg.py | 373 ++ torchaudio/_backend/soundfile.py | 54 + torchaudio/_backend/soundfile_backend.py | 457 ++ torchaudio/_backend/sox.py | 91 + torchaudio/_backend/utils.py | 316 ++ torchaudio/_extension/__init__.py | 120 + torchaudio/_extension/utils.py | 235 + torchaudio/_internal/__init__.py | 5 +- torchaudio/_internal/module_utils.py | 120 +- torchaudio/backend/__init__.py | 12 +- torchaudio/backend/_no_backend.py | 24 + torchaudio/backend/_sox_io_backend.py | 297 ++ torchaudio/backend/common.py | 64 +- torchaudio/backend/no_backend.py | 32 +- torchaudio/backend/soundfile_backend.py | 443 +- torchaudio/backend/sox_io_backend.py | 435 +- torchaudio/backend/utils.py | 14 +- torchaudio/csrc/CMakeLists.txt | 194 +- torchaudio/csrc/cuctc/CMakeLists.txt | 43 + torchaudio/csrc/cuctc/LICENSE | 25 + .../csrc/cuctc/include/ctc_prefix_decoder.h | 64 + .../cuctc/include/ctc_prefix_decoder_host.h | 159 + .../csrc/cuctc/src/bitonic_topk/LICENSE | 201 + .../cuctc/src/bitonic_topk/bitonic_sort.cuh | 316 ++ .../cuctc/src/bitonic_topk/pow2_utils.cuh | 163 + .../cuctc/src/bitonic_topk/warpsort_topk.cuh | 517 +++ torchaudio/csrc/cuctc/src/ctc_fast_divmod.cuh | 167 + .../csrc/cuctc/src/ctc_prefix_decoder.cpp | 379 ++ .../cuctc/src/ctc_prefix_decoder_kernel_v2.cu | 999 ++++ torchaudio/csrc/cuctc/src/device_data_wrap.h | 89 + torchaudio/csrc/cuctc/src/device_log_prob.cuh | 77 + torchaudio/csrc/cuctc/src/python_binding.cpp | 105 + torchaudio/csrc/ffmpeg/CMakeLists.txt | 94 + torchaudio/csrc/ffmpeg/compat.cpp | 62 + torchaudio/csrc/ffmpeg/ffmpeg.cpp | 73 +- torchaudio/csrc/ffmpeg/ffmpeg.h | 45 +- torchaudio/csrc/ffmpeg/filter_graph.cpp | 129 +- torchaudio/csrc/ffmpeg/filter_graph.h | 45 +- torchaudio/csrc/ffmpeg/hw_context.cpp | 40 + torchaudio/csrc/ffmpeg/hw_context.h | 11 + torchaudio/csrc/ffmpeg/pybind/pybind.cpp | 334 +- .../stream_reader/buffer/chunked_buffer.cpp | 129 + .../stream_reader/buffer/chunked_buffer.h | 33 + .../stream_reader/buffer/unchunked_buffer.cpp | 33 + .../stream_reader/buffer/unchunked_buffer.h | 23 + .../csrc/ffmpeg/stream_reader/conversion.cpp | 628 +++ .../csrc/ffmpeg/stream_reader/conversion.h | 129 + .../ffmpeg/stream_reader/packet_buffer.cpp | 20 + .../csrc/ffmpeg/stream_reader/packet_buffer.h | 16 + .../ffmpeg/stream_reader/post_process.cpp | 620 +++ .../csrc/ffmpeg/stream_reader/post_process.h | 34 + .../ffmpeg/stream_reader/stream_processor.cpp | 361 +- .../ffmpeg/stream_reader/stream_processor.h | 55 +- .../ffmpeg/stream_reader/stream_reader.cpp | 395 +- .../csrc/ffmpeg/stream_reader/stream_reader.h | 345 +- .../csrc/ffmpeg/stream_reader/typedefs.h | 144 +- .../ffmpeg/stream_writer/encode_process.cpp | 974 ++++ .../ffmpeg/stream_writer/encode_process.h | 67 + .../csrc/ffmpeg/stream_writer/encoder.cpp | 62 + .../csrc/ffmpeg/stream_writer/encoder.h | 30 + .../ffmpeg/stream_writer/packet_writer.cpp | 36 + .../csrc/ffmpeg/stream_writer/packet_writer.h | 16 + .../ffmpeg/stream_writer/stream_writer.cpp | 1240 ++--- .../csrc/ffmpeg/stream_writer/stream_writer.h | 355 +- .../ffmpeg/stream_writer/tensor_converter.cpp | 497 ++ .../ffmpeg/stream_writer/tensor_converter.h | 95 + torchaudio/csrc/ffmpeg/stream_writer/types.h | 19 + torchaudio/csrc/forced_align/compute.cpp | 19 + torchaudio/csrc/forced_align/compute.h | 10 + torchaudio/csrc/forced_align/cpu/compute.cpp | 205 + torchaudio/csrc/forced_align/gpu/compute.cu | 324 ++ torchaudio/csrc/iir_cuda.cu | 83 + torchaudio/csrc/iir_cuda.h | 8 + torchaudio/csrc/lfilter.cpp | 15 +- torchaudio/csrc/pybind/pybind.cpp | 15 + torchaudio/csrc/rir.cpp | 207 + torchaudio/csrc/rnnt/autograd.cpp | 24 +- torchaudio/csrc/rnnt/compute.cpp | 15 +- torchaudio/csrc/rnnt/compute.h | 3 +- torchaudio/csrc/rnnt/cpu/compute.cpp | 4 +- torchaudio/csrc/rnnt/cpu/cpu_kernels.h | 91 +- torchaudio/csrc/rnnt/dcu/compute.cpp | 6 +- torchaudio/csrc/rnnt/dcu/compute_alphas.cpp | 2 +- torchaudio/csrc/rnnt/dcu/compute_betas.cpp | 2 +- torchaudio/csrc/rnnt/dcu/gpu_kernels.cuh | 19 +- torchaudio/csrc/rnnt/dcu/gpu_transducer.h | 8 +- torchaudio/csrc/rnnt/dcu/kernels.h | 53 +- torchaudio/csrc/rnnt/gpu/compute.cu | 6 +- torchaudio/csrc/rnnt/gpu/compute_alphas.cu | 2 +- torchaudio/csrc/rnnt/gpu/compute_betas.cu | 2 +- torchaudio/csrc/rnnt/gpu/gpu_kernels.cuh | 19 +- torchaudio/csrc/rnnt/gpu/gpu_transducer.h | 8 +- torchaudio/csrc/rnnt/gpu/kernels.h | 53 +- torchaudio/csrc/rnnt/options.h | 9 +- torchaudio/csrc/sox/CMakeLists.txt | 25 + torchaudio/csrc/sox/effects.cpp | 37 +- torchaudio/csrc/sox/effects.h | 8 +- torchaudio/csrc/sox/effects_chain.cpp | 7 +- torchaudio/csrc/sox/effects_chain.h | 6 +- torchaudio/csrc/sox/io.cpp | 33 +- torchaudio/csrc/sox/io.h | 13 +- torchaudio/csrc/sox/pybind/pybind.cpp | 42 +- torchaudio/csrc/sox/types.cpp | 10 +- torchaudio/csrc/sox/types.h | 10 +- torchaudio/csrc/sox/utils.cpp | 57 +- torchaudio/csrc/sox/utils.h | 26 +- torchaudio/csrc/utils.cpp | 34 +- torchaudio/csrc/utils.h | 9 + torchaudio/datasets/__init__.py | 2 + torchaudio/datasets/cmuarctic.py | 6 +- torchaudio/datasets/cmudict.py | 123 +- torchaudio/datasets/dr_vctk.py | 6 +- torchaudio/datasets/gtzan.py | 6 +- torchaudio/datasets/librilight_limited.py | 6 +- torchaudio/datasets/librimix.py | 9 +- torchaudio/datasets/librispeech.py | 6 +- torchaudio/datasets/librispeech_biasing.py | 189 + torchaudio/datasets/libritts.py | 6 +- torchaudio/datasets/ljspeech.py | 6 +- torchaudio/datasets/musdb_hq.py | 6 +- torchaudio/datasets/quesst14.py | 6 +- torchaudio/datasets/speechcommands.py | 6 +- torchaudio/datasets/tedlium.py | 6 +- torchaudio/datasets/utils.py | 203 +- torchaudio/datasets/vctk.py | 6 +- torchaudio/datasets/voxceleb1.py | 6 +- torchaudio/datasets/yesno.py | 6 +- torchaudio/functional/__init__.py | 20 +- torchaudio/functional/_alignment.py | 128 + torchaudio/functional/filtering.py | 181 +- torchaudio/functional/functional.py | 568 ++- torchaudio/io/__init__.py | 47 +- torchaudio/io/_effector.py | 347 ++ torchaudio/io/_playback.py | 71 + torchaudio/io/_stream_reader.py | 447 +- torchaudio/io/_stream_writer.py | 241 +- torchaudio/models/__init__.py | 26 + torchaudio/models/_hdemucs.py | 3 - torchaudio/models/conv_tasnet.py | 1 - torchaudio/models/decoder/__init__.py | 29 +- torchaudio/models/decoder/_ctc_decoder.py | 194 +- .../models/decoder/_cuda_ctc_decoder.py | 187 + torchaudio/models/rnnt.py | 3 +- torchaudio/models/rnnt_decoder.py | 24 +- torchaudio/models/squim/__init__.py | 11 + torchaudio/models/squim/objective.py | 326 ++ torchaudio/models/squim/subjective.py | 150 + torchaudio/models/wav2vec2/__init__.py | 12 + torchaudio/models/wav2vec2/components.py | 175 +- torchaudio/models/wav2vec2/model.py | 377 +- .../wav2vec2/utils/import_huggingface.py | 73 +- torchaudio/models/wav2vec2/wavlm_attention.py | 214 + torchaudio/pipelines/__init__.py | 21 + torchaudio/pipelines/_squim_pipeline.py | 176 + torchaudio/pipelines/_wav2vec2/aligner.py | 87 + torchaudio/pipelines/_wav2vec2/impl.py | 594 ++- torchaudio/pipelines/_wav2vec2/utils.py | 120 + torchaudio/sox_effects/__init__.py | 8 - torchaudio/sox_effects/sox_effects.py | 43 +- torchaudio/transforms/__init__.py | 16 + torchaudio/transforms/_transforms.py | 498 +- torchaudio/utils/__init__.py | 4 - torchaudio/utils/download.py | 4 +- torchaudio/utils/ffmpeg_utils.py | 209 +- torchaudio/utils/sox_utils.py | 35 +- version.txt | 2 +- 449 files changed, 44525 insertions(+), 12745 deletions(-) delete mode 100755 .circleci/build_docs/commit_docs.sh delete mode 100755 .circleci/build_docs/install_wheels.sh delete mode 100644 .circleci/config.yml delete mode 100644 .circleci/config.yml.in delete mode 100755 .circleci/regenerate.py delete mode 100644 .circleci/smoke_test/docker/Dockerfile delete mode 100755 .circleci/smoke_test/docker/build_and_push.sh delete mode 100644 .circleci/unittest/linux/README.md delete mode 100644 .circleci/unittest/linux/docker/.dockerignore delete mode 100644 .circleci/unittest/linux/docker/.gitignore delete mode 100644 .circleci/unittest/linux/docker/Dockerfile delete mode 100755 .circleci/unittest/linux/docker/build_and_push.sh delete mode 100755 .circleci/unittest/linux/docker/scripts/copy_kaldi_executables.sh delete mode 100755 .circleci/unittest/linux/scripts/install.sh delete mode 100755 .circleci/unittest/linux/scripts/run_clang_format.py delete mode 100755 .circleci/unittest/linux/scripts/run_style_checks.sh delete mode 100755 .circleci/unittest/linux/scripts/run_test.sh delete mode 100755 .circleci/unittest/linux/scripts/setup_env.sh delete mode 100644 .circleci/unittest/windows/README.md delete mode 100644 .circleci/unittest/windows/scripts/environment.yml delete mode 100644 .circleci/unittest/windows/scripts/install.sh delete mode 100644 .circleci/unittest/windows/scripts/install_conda.bat delete mode 100644 .circleci/unittest/windows/scripts/run_test.sh delete mode 100644 .circleci/unittest/windows/scripts/set_cuda_envs.sh delete mode 100644 .circleci/unittest/windows/scripts/setup_env.sh create mode 100644 docs/post_process_dispatcher.py create mode 100644 docs/source/Doxyfile create mode 100644 docs/source/_templates/autosummary/cuda_ctc_decoder_class.rst create mode 100644 docs/source/_templates/autosummary/io.rst create mode 100644 docs/source/_templates/breadcrumbs.html create mode 100644 docs/source/build.ffmpeg.rst create mode 100644 docs/source/build.jetson.rst create mode 100644 docs/source/build.linux.rst create mode 100644 docs/source/build.rst create mode 100644 docs/source/build.windows.rst create mode 100644 docs/source/installation.rst create mode 100644 examples/asr/emformer_rnnt/tedlium3/eval_pipeline.py create mode 100644 examples/asr/emformer_rnnt/tedlium3/global_stats.json create mode 100644 examples/asr/emformer_rnnt/tedlium3/lightning.py create mode 100644 examples/asr/emformer_rnnt/tedlium3/train_spm.py create mode 100644 examples/asr/librispeech_conformer_rnnt/README.md create mode 100644 examples/asr/librispeech_conformer_rnnt/data_module.py create mode 100644 examples/asr/librispeech_conformer_rnnt/eval.py create mode 100644 examples/asr/librispeech_conformer_rnnt/global_stats.json create mode 100644 examples/asr/librispeech_conformer_rnnt/lightning.py create mode 100644 examples/asr/librispeech_conformer_rnnt/train.py create mode 100644 examples/asr/librispeech_conformer_rnnt/train_spm.py create mode 100644 examples/asr/librispeech_conformer_rnnt/transforms.py create mode 100644 examples/asr/librispeech_conformer_rnnt_biasing/README.md create mode 100644 examples/asr/librispeech_conformer_rnnt_biasing/blists/README.md create mode 100644 examples/asr/librispeech_conformer_rnnt_biasing/data_module.py create mode 100644 examples/asr/librispeech_conformer_rnnt_biasing/error_analysis/get_error_word_count.py create mode 100644 examples/asr/librispeech_conformer_rnnt_biasing/eval.py create mode 100644 examples/asr/librispeech_conformer_rnnt_biasing/global_stats_100.json create mode 100644 examples/asr/librispeech_conformer_rnnt_biasing/lightning.py create mode 100644 examples/asr/librispeech_conformer_rnnt_biasing/score.sh create mode 100644 examples/asr/librispeech_conformer_rnnt_biasing/train.py create mode 100644 examples/asr/librispeech_conformer_rnnt_biasing/train_spm.py create mode 100644 examples/asr/librispeech_conformer_rnnt_biasing/transforms.py create mode 100644 examples/asr/librispeech_cuda_ctc_decoder/README.md create mode 100644 examples/asr/librispeech_cuda_ctc_decoder/inference.py create mode 100644 examples/avsr/README.md create mode 100644 examples/avsr/average_checkpoints.py create mode 100644 examples/avsr/data_module.py create mode 100644 examples/avsr/data_prep/README.md create mode 100644 examples/avsr/data_prep/data/data_module.py create mode 100644 examples/avsr/data_prep/detectors/mediapipe/detector.py create mode 100644 examples/avsr/data_prep/detectors/mediapipe/video_process.py create mode 100644 examples/avsr/data_prep/detectors/retinaface/detector.py create mode 100644 examples/avsr/data_prep/detectors/retinaface/video_process.py create mode 100644 examples/avsr/data_prep/merge.py create mode 100644 examples/avsr/data_prep/preprocess_lrs3.py create mode 100644 examples/avsr/data_prep/requirements.txt create mode 100644 examples/avsr/data_prep/tools/README.md create mode 100644 examples/avsr/data_prep/utils.py create mode 100644 examples/avsr/eval.py create mode 100644 examples/avsr/lightning.py create mode 100644 examples/avsr/lightning_av.py create mode 100644 examples/avsr/lrs3.py create mode 100644 examples/avsr/models/conformer_rnnt.py create mode 100644 examples/avsr/models/emformer_rnnt.py create mode 100644 examples/avsr/models/fusion.py create mode 100644 examples/avsr/models/resnet.py create mode 100644 examples/avsr/models/resnet1d.py create mode 100644 examples/avsr/schedulers.py create mode 100644 examples/avsr/train.py create mode 100644 examples/avsr/train_spm.py create mode 100644 examples/avsr/transforms.py create mode 100644 examples/dnn_beamformer/README.md create mode 100644 examples/dnn_beamformer/datamodule.py create mode 100644 examples/dnn_beamformer/eval.py create mode 100644 examples/dnn_beamformer/model.py create mode 100644 examples/dnn_beamformer/train.py create mode 100644 examples/dnn_beamformer/utils.py create mode 100644 examples/hubert/lightning_modules.py create mode 100644 examples/self_supervised_learning/README.md create mode 100644 examples/self_supervised_learning/data_modules/__init__.py create mode 100644 examples/self_supervised_learning/data_modules/_hubert_datamodule.py create mode 100644 examples/self_supervised_learning/data_modules/_utils.py create mode 100644 examples/self_supervised_learning/data_modules/_wav2vec2_datamodule.py create mode 100644 examples/self_supervised_learning/lightning_modules.py create mode 100644 examples/self_supervised_learning/losses/__init__.py create mode 100644 examples/self_supervised_learning/losses/_hubert_loss.py create mode 100644 examples/self_supervised_learning/losses/_wav2vec2_loss.py create mode 100644 examples/self_supervised_learning/lr_schedulers/__init__.py create mode 100644 examples/self_supervised_learning/lr_schedulers/_linear_decay.py create mode 100644 examples/self_supervised_learning/train_hubert.py create mode 100644 examples/tutorials/asr_inference_with_cuda_ctc_decoder_tutorial.py create mode 100644 examples/tutorials/ctc_forced_alignment_api_tutorial.py create mode 100644 examples/tutorials/effector_tutorial.py create mode 100644 examples/tutorials/forced_alignment_for_multilingual_data_tutorial.py create mode 100644 examples/tutorials/nvdec_tutorial.py create mode 100644 examples/tutorials/nvenc_tutorial.py create mode 100644 examples/tutorials/squim_tutorial.py create mode 100644 test/integration_tests/prototype/hifi_gan_pipeline_test.py create mode 100644 test/integration_tests/prototype/vggish_pipeline_test.py create mode 100644 test/integration_tests/squim_pipeline_test.py create mode 100644 test/smoke_test/smoke_test_no_ffmpeg.py create mode 100644 test/tools/make_test_env.sh create mode 100644 test/tools/rocky8_make_test_env.sh create mode 100644 test/torchaudio_unittest/assets/RATRACE_wave_f_nm_np1_fr_goo_37.avi create mode 100644 test/torchaudio_unittest/assets/README.md create mode 100644 test/torchaudio_unittest/assets/nasa_13013.avi create mode 100644 test/torchaudio_unittest/assets/testsrc.hevc create mode 100644 test/torchaudio_unittest/assets/wav2vec2/fairseq/xlsr_1b.json create mode 100644 test/torchaudio_unittest/assets/wav2vec2/fairseq/xlsr_2b.json create mode 100644 test/torchaudio_unittest/assets/wav2vec2/fairseq/xlsr_300m.json create mode 100644 test/torchaudio_unittest/assets/wav2vec2/huggingface/wav2vec2-xls-r-1b.json create mode 100644 test/torchaudio_unittest/assets/wav2vec2/huggingface/wav2vec2-xls-r-2b.json create mode 100644 test/torchaudio_unittest/assets/wav2vec2/huggingface/wav2vec2-xls-r-300m.json create mode 100644 test/torchaudio_unittest/assets/wav2vec2/huggingface/wavlm-base.json create mode 100644 test/torchaudio_unittest/assets/wav2vec2/huggingface/wavlm-large.json create mode 100644 test/torchaudio_unittest/backend/dispatcher/__init__.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/dispatcher_test.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/ffmpeg/__init__.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/ffmpeg/info_test.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/ffmpeg/load_test.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/ffmpeg/save_test.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/smoke_test.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/soundfile/__init__.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/soundfile/common.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/soundfile/info_test.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/soundfile/load_test.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/soundfile/save_test.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/sox/__init__.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/sox/common.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/sox/info_test.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/sox/load_test.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/sox/roundtrip_test.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/sox/save_test.py create mode 100644 test/torchaudio_unittest/backend/dispatcher/sox/smoke_test.py create mode 100644 test/torchaudio_unittest/common_utils/autograd_utils.py create mode 100644 test/torchaudio_unittest/example/emformer_rnnt/test_mustc_lightning.py create mode 100644 test/torchaudio_unittest/example/emformer_rnnt/test_tedlium3_lightning.py create mode 100644 test/torchaudio_unittest/io/common.py create mode 100644 test/torchaudio_unittest/io/effector_test.py create mode 100644 test/torchaudio_unittest/io/playback_test.py create mode 100644 test/torchaudio_unittest/models/decoder/cuda_ctc_decoder_test.py create mode 100644 test/torchaudio_unittest/models/squim/__init__.py create mode 100644 test/torchaudio_unittest/models/squim/squim_test.py create mode 100644 third_party/LICENSES_BUNDLED.txt create mode 100644 third_party/ffmpeg/multi/CMakeLists.txt create mode 100644 third_party/ffmpeg/single/CMakeLists.txt create mode 100644 third_party/sox/stub.c create mode 100644 torchaudio/_backend/__init__.py create mode 100644 torchaudio/_backend/backend.py create mode 100644 torchaudio/_backend/common.py create mode 100644 torchaudio/_backend/ffmpeg.py create mode 100644 torchaudio/_backend/soundfile.py create mode 100644 torchaudio/_backend/soundfile_backend.py create mode 100644 torchaudio/_backend/sox.py create mode 100644 torchaudio/_backend/utils.py create mode 100644 torchaudio/_extension/__init__.py create mode 100644 torchaudio/_extension/utils.py create mode 100644 torchaudio/backend/_no_backend.py create mode 100644 torchaudio/backend/_sox_io_backend.py create mode 100644 torchaudio/csrc/cuctc/CMakeLists.txt create mode 100644 torchaudio/csrc/cuctc/LICENSE create mode 100644 torchaudio/csrc/cuctc/include/ctc_prefix_decoder.h create mode 100644 torchaudio/csrc/cuctc/include/ctc_prefix_decoder_host.h create mode 100644 torchaudio/csrc/cuctc/src/bitonic_topk/LICENSE create mode 100644 torchaudio/csrc/cuctc/src/bitonic_topk/bitonic_sort.cuh create mode 100644 torchaudio/csrc/cuctc/src/bitonic_topk/pow2_utils.cuh create mode 100644 torchaudio/csrc/cuctc/src/bitonic_topk/warpsort_topk.cuh create mode 100644 torchaudio/csrc/cuctc/src/ctc_fast_divmod.cuh create mode 100644 torchaudio/csrc/cuctc/src/ctc_prefix_decoder.cpp create mode 100644 torchaudio/csrc/cuctc/src/ctc_prefix_decoder_kernel_v2.cu create mode 100644 torchaudio/csrc/cuctc/src/device_data_wrap.h create mode 100644 torchaudio/csrc/cuctc/src/device_log_prob.cuh create mode 100644 torchaudio/csrc/cuctc/src/python_binding.cpp create mode 100644 torchaudio/csrc/ffmpeg/CMakeLists.txt create mode 100644 torchaudio/csrc/ffmpeg/compat.cpp create mode 100644 torchaudio/csrc/ffmpeg/hw_context.cpp create mode 100644 torchaudio/csrc/ffmpeg/hw_context.h create mode 100644 torchaudio/csrc/ffmpeg/stream_reader/buffer/chunked_buffer.cpp create mode 100644 torchaudio/csrc/ffmpeg/stream_reader/buffer/chunked_buffer.h create mode 100644 torchaudio/csrc/ffmpeg/stream_reader/buffer/unchunked_buffer.cpp create mode 100644 torchaudio/csrc/ffmpeg/stream_reader/buffer/unchunked_buffer.h create mode 100644 torchaudio/csrc/ffmpeg/stream_reader/conversion.cpp create mode 100644 torchaudio/csrc/ffmpeg/stream_reader/conversion.h create mode 100644 torchaudio/csrc/ffmpeg/stream_reader/packet_buffer.cpp create mode 100644 torchaudio/csrc/ffmpeg/stream_reader/packet_buffer.h create mode 100644 torchaudio/csrc/ffmpeg/stream_reader/post_process.cpp create mode 100644 torchaudio/csrc/ffmpeg/stream_reader/post_process.h create mode 100644 torchaudio/csrc/ffmpeg/stream_writer/encode_process.cpp create mode 100644 torchaudio/csrc/ffmpeg/stream_writer/encode_process.h create mode 100644 torchaudio/csrc/ffmpeg/stream_writer/encoder.cpp create mode 100644 torchaudio/csrc/ffmpeg/stream_writer/encoder.h create mode 100644 torchaudio/csrc/ffmpeg/stream_writer/packet_writer.cpp create mode 100644 torchaudio/csrc/ffmpeg/stream_writer/packet_writer.h create mode 100644 torchaudio/csrc/ffmpeg/stream_writer/tensor_converter.cpp create mode 100644 torchaudio/csrc/ffmpeg/stream_writer/tensor_converter.h create mode 100644 torchaudio/csrc/ffmpeg/stream_writer/types.h create mode 100644 torchaudio/csrc/forced_align/compute.cpp create mode 100644 torchaudio/csrc/forced_align/compute.h create mode 100644 torchaudio/csrc/forced_align/cpu/compute.cpp create mode 100644 torchaudio/csrc/forced_align/gpu/compute.cu create mode 100644 torchaudio/csrc/iir_cuda.cu create mode 100644 torchaudio/csrc/iir_cuda.h create mode 100644 torchaudio/csrc/pybind/pybind.cpp create mode 100644 torchaudio/csrc/rir.cpp create mode 100644 torchaudio/csrc/sox/CMakeLists.txt create mode 100644 torchaudio/csrc/utils.h create mode 100644 torchaudio/datasets/librispeech_biasing.py create mode 100644 torchaudio/functional/_alignment.py create mode 100644 torchaudio/io/_effector.py create mode 100644 torchaudio/io/_playback.py create mode 100644 torchaudio/models/decoder/_cuda_ctc_decoder.py create mode 100644 torchaudio/models/squim/__init__.py create mode 100644 torchaudio/models/squim/objective.py create mode 100644 torchaudio/models/squim/subjective.py create mode 100644 torchaudio/models/wav2vec2/wavlm_attention.py create mode 100644 torchaudio/pipelines/_squim_pipeline.py create mode 100644 torchaudio/pipelines/_wav2vec2/aligner.py diff --git a/.circleci/build_docs/commit_docs.sh b/.circleci/build_docs/commit_docs.sh deleted file mode 100755 index 59374dce..00000000 --- a/.circleci/build_docs/commit_docs.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -set -ex - - -if [ "$2" == "" ]; then - echo call as "$0" "" "" - echo where src is the root of the built documentation git checkout and - echo branch should be "main" or "1.7" or so - exit 1 -fi - -src=$1 -target=$2 - -echo "committing docs from ${src} to ${target}" - -pushd "${src}" -git checkout gh-pages -mkdir -p ./"${target}" -rm -rf ./"${target}"/* -cp -r "${src}/docs/build/html/"* ./"$target" -if [ "${target}" == "main" ]; then - mkdir -p ./_static - rm -rf ./_static/* - cp -r "${src}/docs/build/html/_static/"* ./_static - git add --all ./_static || true -fi -git add --all ./"${target}" || true -git config user.email "soumith+bot@pytorch.org" -git config user.name "pytorchbot" -# If there aren't changes, don't make a commit; push is no-op -git commit -m "auto-generating sphinx docs" || true -git remote add https https://github.com/pytorch/audio.git -git push -u https gh-pages diff --git a/.circleci/build_docs/install_wheels.sh b/.circleci/build_docs/install_wheels.sh deleted file mode 100755 index cc7683c6..00000000 --- a/.circleci/build_docs/install_wheels.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -if [[ -z "$PYTORCH_VERSION" ]]; then - # Nightly build - pip install --progress-bar off --pre torch -f "https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html" -else - # Release branch - pip install --progress-bar off "torch==${PYTORCH_VERSION}+cpu" \ - -f https://download.pytorch.org/whl/torch_stable.html \ - -f "https://download.pytorch.org/whl/${UPLOAD_CHANNEL}/torch_${UPLOAD_CHANNEL}.html" -fi -pip install --progress-bar off --no-deps ~/workspace/torchaudio* -pip install --progress-bar off -r docs/requirements.txt -r docs/requirements-tutorials.txt diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 837f805a..00000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,4096 +0,0 @@ -version: 2.1 - -# How to test the Linux jobs: -# - Install CircleCI local CLI: https://circleci.com/docs/2.0/local-cli/ -# - circleci config process .circleci/config.yml > gen.yml && circleci local execute -c gen.yml --job binary_linux_wheel_py3.8 -# - Replace binary_linux_wheel_py3.8 with the name of the job you want to test. -# Job names are 'name:' key. - -executors: - windows-cpu: - machine: - resource_class: windows.xlarge - image: windows-server-2019-vs2019:stable - shell: bash.exe - - windows-gpu: - machine: - resource_class: windows.gpu.nvidia.medium - image: windows-server-2019-nvidia:stable - shell: bash.exe - -commands: - generate_cache_key: - description: "Generates a cache key file that changes daily" - steps: - - run: - name: Generate cache key - command: echo "$(date +"%Y-%m-%d")" > .cachekey - designate_upload_channel: - description: "inserts the correct upload channel into ${BASH_ENV}" - steps: - - run: - name: adding UPLOAD_CHANNEL to BASH_ENV - command: | - # Hardcoded for release branch - echo "export UPLOAD_CHANNEL=test" >> ${BASH_ENV} - load_conda_channel_flags: - description: "Determines whether we need extra conda channels" - steps: - - run: - name: Adding CONDA_CHANNEL_FLAGS to BASH_ENV - command: | - CONDA_CHANNEL_FLAGS="" - # formerly used to add conda-forge flags for Python 3.9, reserving the mechanism for future python upgrades - windows_install_cuda: - description: "Install desired CUDA version on Windows runners" - steps: - - run: - name: Install CUDA - command: | - packaging/windows/internal/cuda_install.bat - -binary_common: &binary_common - parameters: - # Edit these defaults to do a release - build_version: - description: "version number of release binary; by default, build a nightly" - type: string - default: "0.13.1" - pytorch_version: - description: "PyTorch version to build against; by default, use a nightly" - type: string - default: "1.13.1" - # Don't edit these - python_version: - description: "Python version to build against (e.g., 3.8)" - type: string - cuda_version: - description: "CUDA version to build against (e.g., cpu, cu101)" - type: string - default: "cpu" - wheel_docker_image: - description: "Wheel only: what docker image to use" - type: string - default: "pytorch/manylinux-cuda116" - conda_docker_image: - description: "Conda only: what docker image to use" - type: string - default: "pytorch/conda-builder:cuda116" - environment: &environment - PYTHON_VERSION: << parameters.python_version >> - BUILD_VERSION: << parameters.build_version >> - PYTORCH_VERSION: << parameters.pytorch_version >> - CU_VERSION: << parameters.cuda_version >> - MACOSX_DEPLOYMENT_TARGET: 10.9 - -smoke_test_common: &smoke_test_common - <<: *binary_common - docker: - - image: pytorch/torchaudio_unittest_base:smoke_test-20220425 - resource_class: large - -jobs: - circleci_consistency: - docker: - - image: cimg/python:3.8 - steps: - - checkout - - run: - command: | - pip install --user --progress-bar off jinja2 pyyaml - python .circleci/regenerate.py - git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1) - - lint_python_and_config: - docker: - - image: circleci/python:3.7 - steps: - - checkout - - run: - name: Install pre-commit - command: pip install --user --progress-bar off pre-commit - - run: - name: Install pre-commit hooks - command: pre-commit install-hooks - - run: - name: Lint Python code and config files - command: pre-commit run --all-files - - run: - name: Required lint modifications - when: always - command: git --no-pager diff --color=always - - download_third_parties: - docker: - - image: "pytorch/torchaudio_unittest_base:manylinux" - resource_class: small - steps: - - checkout - - generate_cache_key - - restore_cache: - - keys: - - tp-nix-v2-{{ checksum ".cachekey" }} - - - run: - command: | - mkdir -p third_party/archives/ - wget --no-clobber --directory-prefix=third_party/archives/ $(awk '/URL /{print $2}' third_party/*/CMakeLists.txt) - - save_cache: - - key: tp-nix-v2-{{ checksum ".cachekey" }} - - paths: - - third_party/archives - - persist_to_workspace: - root: third_party - paths: - - archives - - build_ffmpeg_linux: - <<: *binary_common - docker: - - image: << parameters.wheel_docker_image >> - resource_class: 2xlarge+ - steps: - - checkout - - generate_cache_key - - restore_cache: - - keys: - - ffmpeg-linux-v0-{{ checksum ".cachekey" }} - - - run: - command: | - export FFMPEG_ROOT=${PWD}/third_party/ffmpeg - if [[ ! -d ${FFMPEG_ROOT} ]]; then - packaging/ffmpeg/build.sh - fi - - save_cache: - - key: ffmpeg-linux-v0-{{ checksum ".cachekey" }} - - paths: - - third_party/ffmpeg - - persist_to_workspace: - root: third_party - paths: - - ffmpeg - - build_ffmpeg_macos: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout - - generate_cache_key - - restore_cache: - - keys: - - ffmpeg-macos-v0-{{ checksum ".cachekey" }} - - - run: - command: | - export FFMPEG_ROOT=${PWD}/third_party/ffmpeg - if [[ ! -d ${FFMPEG_ROOT} ]]; then - packaging/ffmpeg/build.sh - fi - - save_cache: - - key: ffmpeg-macos-v0-{{ checksum ".cachekey" }} - - paths: - - third_party/ffmpeg - - persist_to_workspace: - root: third_party - paths: - - ffmpeg - - build_ffmpeg_windows: - <<: *binary_common - machine: - resource_class: windows.xlarge - image: windows-server-2019-vs2019:stable - # Note: - # Unlike other Windows job, this job uses cmd.exe as shell because - # we need to invoke bash.exe from msys2 in ffmpeg build process, and doing so - # from different installation of bash.exe (the one from the VM) cause issue - shell: cmd.exe - steps: - - checkout - - run: date /t > .cachekey - - restore_cache: - - keys: - - ffmpeg-windows-{{ checksum ".cachekey" }} - - - run: packaging\ffmpeg\build.bat - - save_cache: - - key: ffmpeg-windows-{{ checksum ".cachekey" }} - - paths: - - third_party/ffmpeg - - persist_to_workspace: - root: third_party - paths: - - ffmpeg - - binary_linux_wheel: - <<: *binary_common - docker: - - image: << parameters.wheel_docker_image >> - resource_class: 2xlarge+ - steps: - - checkout - - designate_upload_channel - - attach_workspace: - at: third_party - - run: - command: | - export FFMPEG_ROOT=${PWD}/third_party/ffmpeg - packaging/build_wheel.sh - environment: - USE_FFMPEG: true - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - binary_linux_conda: - <<: *binary_common - docker: - - image: "<< parameters.conda_docker_image >>" - resource_class: 2xlarge+ - steps: - - checkout - - load_conda_channel_flags - - attach_workspace: - at: third_party - - run: - name: Build conda packages - no_output_timeout: 30m - command: | - export FFMPEG_ROOT=${PWD}/third_party/ffmpeg - packaging/build_conda.sh - environment: - USE_FFMPEG: true - - store_artifacts: - path: /opt/conda/conda-bld/linux-64 - - persist_to_workspace: - root: /opt/conda - paths: - - "conda-bld/*" - - binary_macos_wheel: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout - - designate_upload_channel - - load_conda_channel_flags - - attach_workspace: - at: third_party - - run: - # Cannot easily deduplicate this as source'ing activate - # will set environment variables which we need to propagate - # to build_wheel.sh - command: | - curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - sh conda.sh -b - source $HOME/miniconda3/bin/activate - export FFMPEG_ROOT="${PWD}/third_party/ffmpeg" - packaging/build_wheel.sh - environment: - USE_FFMPEG: true - USE_OPENMP: false - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - binary_macos_conda: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout - - load_conda_channel_flags - - attach_workspace: - at: third_party - - run: - command: | - curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - sh conda.sh -b - source $HOME/miniconda3/bin/activate - conda install -yq conda-build - export FFMPEG_ROOT="${PWD}/third_party/ffmpeg" - packaging/build_conda.sh - environment: - USE_FFMPEG: true - USE_OPENMP: false - - store_artifacts: - path: /Users/distiller/miniconda3/conda-bld/osx-64 - - persist_to_workspace: - root: /Users/distiller/miniconda3 - paths: - - "conda-bld/*" - - binary_windows_wheel: - <<: *binary_common - executor: - name: windows-cpu - steps: - - checkout - - designate_upload_channel - - load_conda_channel_flags - - windows_install_cuda - - attach_workspace: - at: third_party - - run: - name: Build wheel packages - no_output_timeout: 30m - command: | - set -ex - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate base - export FFMPEG_ROOT="${PWD}/third_party/ffmpeg" - bash packaging/build_wheel.sh - environment: - USE_FFMPEG: true - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - binary_windows_conda: - <<: *binary_common - executor: - name: windows-cpu - steps: - - checkout - - load_conda_channel_flags - - windows_install_cuda - - attach_workspace: - at: third_party - - run: - name: Build conda packages - no_output_timeout: 30m - command: | - set -ex - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate base - conda install -yq conda-build "conda-package-handling!=1.5.0" - export FFMPEG_ROOT="${PWD}/third_party/ffmpeg" - bash packaging/build_conda.sh - environment: - USE_FFMPEG: true - - store_artifacts: - path: C:/tools/miniconda3/conda-bld/win-64 - - persist_to_workspace: - root: C:/tools/miniconda3 - paths: - - "conda-bld/*" - - # Requires org-member context - binary_conda_upload: - docker: - - image: continuumio/miniconda - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - command: | - # Prevent credential from leaking - conda install -yq anaconda-client - set -x - anaconda -t "${CONDA_PYTORCHBOT_TOKEN}" upload ~/workspace/conda-bld/*/*.tar.bz2 -u "pytorch-${UPLOAD_CHANNEL}" --label main --no-progress --force - - # Requires org-member context - binary_wheel_upload: - parameters: - subfolder: - description: "What whl subfolder to upload to, e.g., blank or cu100/ (trailing slash is important)" - type: string - docker: - - image: cimg/python:3.8 - steps: - - attach_workspace: - at: ~/workspace - - checkout - - designate_upload_channel - - run: - command: | - pip install --user awscli - export PATH="$HOME/.local/bin:$PATH" - # Prevent credential from leaking - set +x - export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}" - export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}" - set -x - for pkg in ~/workspace/*.whl; do - aws s3 cp "$pkg" "s3://pytorch/whl/${UPLOAD_CHANNEL}/<< parameters.subfolder >>" --acl public-read - done - - smoke_test_linux_conda: - <<: *smoke_test_common - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - load_conda_channel_flags - - run: - name: install binaries - no_output_timeout: 30m - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} pytorch cpuonly - conda install -v -y -c file://$HOME/workspace/conda-bld torchaudio - - checkout - - run: - name: smoke test - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - conda install 'ffmpeg<5' - ./test/smoke_test/run_smoke_test.sh - - smoke_test_linux_conda_gpu: - <<: *smoke_test_common - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - load_conda_channel_flags - - run: - name: install binaries - no_output_timeout: 30m - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - if [[ "$CU_VERSION" == cu116 || "$CU_VERSION" == cu117 ]]; then - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} -c nvidia pytorch pytorch-cuda=${CU_VERSION:2:2}.${CU_VERSION:4} - else - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} pytorch cudatoolkit=${CU_VERSION:2:2}.${CU_VERSION:4} - fi - conda install -v -y -c file://$HOME/workspace/conda-bld torchaudio - - checkout - - run: - name: smoke test - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - conda install 'ffmpeg<5' - ./test/smoke_test/run_smoke_test.sh - - smoke_test_linux_pip: - <<: *smoke_test_common - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - load_conda_channel_flags - - run: - name: install binaries - no_output_timeout: 30m - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - pip install $(ls ~/workspace/torchaudio*.whl) -f "https://download.pytorch.org/whl/${UPLOAD_CHANNEL}/${CU_VERSION}/torch_${UPLOAD_CHANNEL}.html" - - checkout - - run: - name: smoke test - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${HOME}/workspace/ffmpeg/lib" - ./test/smoke_test/run_smoke_test.sh - - smoke_test_windows_conda: - <<: *binary_common - executor: - name: windows-cpu - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - load_conda_channel_flags - - run: - name: install binaries - no_output_timeout: 30m - command: | - set -x - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda update -y conda - conda env remove -n python${PYTHON_VERSION} || true - conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} - conda activate python${PYTHON_VERSION} - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} pytorch cpuonly - conda install -v -y $(ls ~/workspace/conda-bld/win-64/torchaudio*.tar.bz2) - - checkout - - run: - name: smoke test - command: | - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate python${PYTHON_VERSION} - conda install 'ffmpeg<5' - ./test/smoke_test/run_smoke_test.sh - - smoke_test_windows_conda_gpu: - <<: *binary_common - executor: - name: windows-gpu - steps: - - checkout - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - load_conda_channel_flags - - windows_install_cuda - - run: - name: Update CUDA driver - command: packaging/windows/internal/driver_update.bat - - run: - name: install binaries - no_output_timeout: 30m - command: | - set -x - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda update -y conda - conda env remove -n python${PYTHON_VERSION} || true - conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} - conda activate python${PYTHON_VERSION} - # Include numpy and cudatoolkit in the install conda-forge chanell is used for cudatoolkit - - if [[ "$CU_VERSION" == cu116 || "$CU_VERSION" == cu117 ]]; then - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} -c nvidia pytorch numpy ffmpeg pytorch-cuda=${CU_VERSION:2:2}.${CU_VERSION:4} - else - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} pytorch numpy ffmpeg cudatoolkit=${CU_VERSION:2:2}.${CU_VERSION:4} - fi - # Install from torchaudio file - conda install -v -y $(ls ~/workspace/conda-bld/win-64/torchaudio*.tar.bz2) - - run: - name: smoke test - command: | - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate python${PYTHON_VERSION} - # Install sound backend - pip install PySoundFile - # conda install 'ffmpeg<5' - ./test/smoke_test/run_smoke_test.sh - - smoke_test_windows_pip: - <<: *binary_common - executor: - name: windows-cpu - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - load_conda_channel_flags - - run: - name: install binaries - no_output_timeout: 30m - command: | - set -x - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda update -y conda - conda env remove -n python${PYTHON_VERSION} || true - conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} - conda activate python${PYTHON_VERSION} - pip install $(ls ~/workspace/torchaudio*.whl) -f "https://download.pytorch.org/whl/${UPLOAD_CHANNEL}/${CU_VERSION}/torch_${UPLOAD_CHANNEL}.html" - - checkout - - run: - name: smoke test - command: | - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate python${PYTHON_VERSION} - # Hack to load FFmpeg libraries - # Note: Depending on Python version, they search different paths. - # For 3.7 and 3.9, copying them in CWD works. - cp ~/workspace/ffmpeg/bin/* test/smoke_test/ - # For 3.8 and 3.10, they must be in the same directory as the entrypoint lib - cp ~/workspace/ffmpeg/bin/* /C/tools/miniconda3/envs/python${PYTHON_VERSION}/lib/site-packages/torchaudio/lib/ - ./test/smoke_test/run_smoke_test.sh - - unittest_linux_cpu: - <<: *binary_common - docker: - - image: pytorch/torchaudio_unittest_base:manylinux-20210121 - resource_class: 2xlarge+ - steps: - - checkout - - attach_workspace: - at: third_party - - designate_upload_channel - - load_conda_channel_flags - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - run: - name: Install torchaudio - command: .circleci/unittest/linux/scripts/install.sh - environment: - USE_FFMPEG: true - - run: - name: Run tests - command: .circleci/unittest/linux/scripts/run_test.sh - environment: - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CUDA: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true - - store_test_results: - path: test-results - - store_artifacts: - path: test/htmlcov - unittest_linux_gpu: - <<: *binary_common - machine: - image: ubuntu-2004-cuda-11.4:202110-01 - resource_class: gpu.nvidia.medium - environment: - <<: *environment - CUDA_VERSION: 11.6 - image_name: pytorch/torchaudio_unittest_base:manylinux-cuda10.2-cudnn8-20210623 - steps: - - checkout - - attach_workspace: - at: third_party - - designate_upload_channel - - load_conda_channel_flags - - run: - name: Pull Docker image - command: docker pull --quiet "${image_name}" - - run: - name: Setup - command: docker run -t --gpus all -e PYTHON_VERSION -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/setup_env.sh - - run: - name: Install torchaudio - command: docker run -t --gpus all -e UPLOAD_CHANNEL -e CONDA_CHANNEL_FLAGS -e CUDA_VERSION -e USE_FFMPEG=1 -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/install.sh - - run: - name: Run tests - environment: - TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true - command: | - docker run -t --gpus all -v $PWD:$PWD -w $PWD -e "CI=${CI}" -e TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310 "${image_name}" .circleci/unittest/linux/scripts/run_test.sh - - store_test_results: - path: test-results - - store_artifacts: - path: test/htmlcov - - unittest_windows_cpu: - <<: *binary_common - executor: - name: windows-cpu - steps: - - checkout - - designate_upload_channel - - load_conda_channel_flags - - run: - name: Setup - command: .circleci/unittest/windows/scripts/setup_env.sh - - run: - name: Install torchaudio - command: .circleci/unittest/windows/scripts/install.sh - environment: - USE_FFMPEG: true - - run: - name: Run tests - command: .circleci/unittest/windows/scripts/run_test.sh - environment: - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_APPLY_CMVN_SLIDING: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_FBANK_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_KALDI_PITCH_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_MFCC_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_SPECTROGRAM_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_SOX: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CUDA: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_KALDI: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_sentencepiece: true - - store_test_results: - path: test-results - - store_artifacts: - path: test/htmlcov - - unittest_windows_gpu: - <<: *binary_common - executor: - name: windows-gpu - environment: - <<: *environment - CUDA_VERSION: "11.6" - steps: - - checkout - - designate_upload_channel - - load_conda_channel_flags - - run: - name: Setup - command: .circleci/unittest/windows/scripts/setup_env.sh - - run: - name: Install CUDA - command: packaging/windows/internal/cuda_install.bat - - run: - name: Update CUDA driver - command: packaging/windows/internal/driver_update.bat - - run: - name: Install torchaudio - command: .circleci/unittest/windows/scripts/install.sh - environment: - USE_FFMPEG: true - - run: - name: Run tests - command: .circleci/unittest/windows/scripts/run_test.sh - environment: - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_APPLY_CMVN_SLIDING: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_FBANK_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_KALDI_PITCH_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_MFCC_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_SPECTROGRAM_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_SOX: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_KALDI: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_sentencepiece: true - - store_test_results: - path: test-results - - store_artifacts: - path: test/htmlcov - - unittest_macos_cpu: - <<: *binary_common - macos: - xcode: "14.0" - resource_class: large - steps: - - checkout - - load_conda_channel_flags - - attach_workspace: - at: third_party - - designate_upload_channel - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - run: - name: Install torchaudio - command: .circleci/unittest/linux/scripts/install.sh - environment: - USE_FFMPEG: true - USE_OPENMP: false - - run: - name: Run tests - command: .circleci/unittest/linux/scripts/run_test.sh - environment: - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_APPLY_CMVN_SLIDING: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_FBANK_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_KALDI_PITCH_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_MFCC_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_SPECTROGRAM_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CUDA: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_QUANTIZATION: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_sentencepiece: true - - store_test_results: - path: test-results - - store_artifacts: - path: test/htmlcov - - stylecheck: - <<: *binary_common - docker: - - image: "pytorch/torchaudio_unittest_base:manylinux" - resource_class: medium - steps: - - checkout - - designate_upload_channel - - load_conda_channel_flags - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - run: - name: Run style check - command: .circleci/unittest/linux/scripts/run_style_checks.sh - - build_docs: - <<: *smoke_test_common - resource_class: 2xlarge+ - steps: - - attach_workspace: - at: ~/workspace - - checkout - - designate_upload_channel - - load_conda_channel_flags - - run: - name: Install packages - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - if [[ "$CU_VERSION" == cu116 || "$CU_VERSION" == cu117 ]]; then - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} -c nvidia pytorch pytorch-cuda=${CU_VERSION:2:2}.${CU_VERSION:4} - else - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} pytorch cudatoolkit=${CU_VERSION:2:2}.${CU_VERSION:4} - fi - conda install -v -y -c file://$HOME/workspace/conda-bld torchaudio - # gxx_linux-64 is for installing pesq library that depends on cython - conda install -y pandoc 'ffmpeg<5' gxx_linux-64 - apt update -qq && apt-get -qq install -y git make - pip install --progress-bar off -r docs/requirements.txt -r docs/requirements-tutorials.txt - - run: - name: Build docs - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - cd docs - make 'SPHINXOPTS=-W' html - cd build - tar -czf artifact.tar.gz html - mv artifact.tar.gz html - environment: - BUILD_GALLERY: 1 - TORCH_SHOW_CPP_STACKTRACES: 1 - no_output_timeout: 30m - - persist_to_workspace: - root: ./ - paths: - - "*" - - store_artifacts: - path: ./docs/build/html - destination: docs - - upload_docs: - <<: *binary_common - docker: - - image: "pytorch/manylinux-cuda100" - resource_class: 2xlarge+ - steps: - - attach_workspace: - at: ~/workspace - - run: - name: Generate netrc - command: | - # set credentials for https pushing - # requires the org-member context - cat > ~/.netrc \< gen.yml && circleci local execute -c gen.yml --job binary_linux_wheel_py3.8 -# - Replace binary_linux_wheel_py3.8 with the name of the job you want to test. -# Job names are 'name:' key. - -executors: - windows-cpu: - machine: - resource_class: windows.xlarge - image: windows-server-2019-vs2019:stable - shell: bash.exe - - windows-gpu: - machine: - resource_class: windows.gpu.nvidia.medium - image: windows-server-2019-nvidia:stable - shell: bash.exe - -commands: - generate_cache_key: - description: "Generates a cache key file that changes daily" - steps: - - run: - name: Generate cache key - command: echo "$(date +"%Y-%m-%d")" > .cachekey - designate_upload_channel: - description: "inserts the correct upload channel into ${BASH_ENV}" - steps: - - run: - name: adding UPLOAD_CHANNEL to BASH_ENV - command: | - # Hardcoded for release branch - echo "export UPLOAD_CHANNEL=test" >> ${BASH_ENV} - load_conda_channel_flags: - description: "Determines whether we need extra conda channels" - steps: - - run: - name: Adding CONDA_CHANNEL_FLAGS to BASH_ENV - command: | - CONDA_CHANNEL_FLAGS="" - # formerly used to add conda-forge flags for Python 3.9, reserving the mechanism for future python upgrades - windows_install_cuda: - description: "Install desired CUDA version on Windows runners" - steps: - - run: - name: Install CUDA - command: | - packaging/windows/internal/cuda_install.bat - -binary_common: &binary_common - parameters: - # Edit these defaults to do a release - build_version: - description: "version number of release binary; by default, build a nightly" - type: string - default: "0.13.1" - pytorch_version: - description: "PyTorch version to build against; by default, use a nightly" - type: string - default: "1.13.1" - # Don't edit these - python_version: - description: "Python version to build against (e.g., 3.8)" - type: string - cuda_version: - description: "CUDA version to build against (e.g., cpu, cu101)" - type: string - default: "cpu" - wheel_docker_image: - description: "Wheel only: what docker image to use" - type: string - default: "pytorch/manylinux-cuda116" - conda_docker_image: - description: "Conda only: what docker image to use" - type: string - default: "pytorch/conda-builder:cuda116" - environment: &environment - PYTHON_VERSION: << parameters.python_version >> - BUILD_VERSION: << parameters.build_version >> - PYTORCH_VERSION: << parameters.pytorch_version >> - CU_VERSION: << parameters.cuda_version >> - MACOSX_DEPLOYMENT_TARGET: 10.9 - -smoke_test_common: &smoke_test_common - <<: *binary_common - docker: - - image: pytorch/torchaudio_unittest_base:smoke_test-20220425 - resource_class: large - -jobs: - circleci_consistency: - docker: - - image: cimg/python:3.8 - steps: - - checkout - - run: - command: | - pip install --user --progress-bar off jinja2 pyyaml - python .circleci/regenerate.py - git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1) - - lint_python_and_config: - docker: - - image: circleci/python:3.7 - steps: - - checkout - - run: - name: Install pre-commit - command: pip install --user --progress-bar off pre-commit - - run: - name: Install pre-commit hooks - command: pre-commit install-hooks - - run: - name: Lint Python code and config files - command: pre-commit run --all-files - - run: - name: Required lint modifications - when: always - command: git --no-pager diff --color=always - - download_third_parties: - docker: - - image: "pytorch/torchaudio_unittest_base:manylinux" - resource_class: small - steps: - - checkout - - generate_cache_key - - restore_cache: - {% raw %} - keys: - - tp-nix-v2-{{ checksum ".cachekey" }} - {% endraw %} - - run: - command: | - mkdir -p third_party/archives/ - wget --no-clobber --directory-prefix=third_party/archives/ $(awk '/URL /{print $2}' third_party/*/CMakeLists.txt) - - save_cache: - {% raw %} - key: tp-nix-v2-{{ checksum ".cachekey" }} - {% endraw %} - paths: - - third_party/archives - - persist_to_workspace: - root: third_party - paths: - - archives - - build_ffmpeg_linux: - <<: *binary_common - docker: - - image: << parameters.wheel_docker_image >> - resource_class: 2xlarge+ - steps: - - checkout - - generate_cache_key - - restore_cache: - {% raw %} - keys: - - ffmpeg-linux-v0-{{ checksum ".cachekey" }} - {% endraw %} - - run: - command: | - export FFMPEG_ROOT=${PWD}/third_party/ffmpeg - if [[ ! -d ${FFMPEG_ROOT} ]]; then - packaging/ffmpeg/build.sh - fi - - save_cache: - {% raw %} - key: ffmpeg-linux-v0-{{ checksum ".cachekey" }} - {% endraw %} - paths: - - third_party/ffmpeg - - persist_to_workspace: - root: third_party - paths: - - ffmpeg - - build_ffmpeg_macos: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout - - generate_cache_key - - restore_cache: - {% raw %} - keys: - - ffmpeg-macos-v0-{{ checksum ".cachekey" }} - {% endraw %} - - run: - command: | - export FFMPEG_ROOT=${PWD}/third_party/ffmpeg - if [[ ! -d ${FFMPEG_ROOT} ]]; then - packaging/ffmpeg/build.sh - fi - - save_cache: - {% raw %} - key: ffmpeg-macos-v0-{{ checksum ".cachekey" }} - {% endraw %} - paths: - - third_party/ffmpeg - - persist_to_workspace: - root: third_party - paths: - - ffmpeg - - build_ffmpeg_windows: - <<: *binary_common - machine: - resource_class: windows.xlarge - image: windows-server-2019-vs2019:stable - # Note: - # Unlike other Windows job, this job uses cmd.exe as shell because - # we need to invoke bash.exe from msys2 in ffmpeg build process, and doing so - # from different installation of bash.exe (the one from the VM) cause issue - shell: cmd.exe - steps: - - checkout - - run: date /t > .cachekey - - restore_cache: - {% raw %} - keys: - - ffmpeg-windows-{{ checksum ".cachekey" }} - {% endraw %} - - run: packaging\ffmpeg\build.bat - - save_cache: - {% raw %} - key: ffmpeg-windows-{{ checksum ".cachekey" }} - {% endraw %} - paths: - - third_party/ffmpeg - - persist_to_workspace: - root: third_party - paths: - - ffmpeg - - binary_linux_wheel: - <<: *binary_common - docker: - - image: << parameters.wheel_docker_image >> - resource_class: 2xlarge+ - steps: - - checkout - - designate_upload_channel - - attach_workspace: - at: third_party - - run: - command: | - export FFMPEG_ROOT=${PWD}/third_party/ffmpeg - packaging/build_wheel.sh - environment: - USE_FFMPEG: true - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - binary_linux_conda: - <<: *binary_common - docker: - - image: "<< parameters.conda_docker_image >>" - resource_class: 2xlarge+ - steps: - - checkout - - load_conda_channel_flags - - attach_workspace: - at: third_party - - run: - name: Build conda packages - no_output_timeout: 30m - command: | - export FFMPEG_ROOT=${PWD}/third_party/ffmpeg - packaging/build_conda.sh - environment: - USE_FFMPEG: true - - store_artifacts: - path: /opt/conda/conda-bld/linux-64 - - persist_to_workspace: - root: /opt/conda - paths: - - "conda-bld/*" - - binary_macos_wheel: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout - - designate_upload_channel - - load_conda_channel_flags - - attach_workspace: - at: third_party - - run: - # Cannot easily deduplicate this as source'ing activate - # will set environment variables which we need to propagate - # to build_wheel.sh - command: | - curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - sh conda.sh -b - source $HOME/miniconda3/bin/activate - export FFMPEG_ROOT="${PWD}/third_party/ffmpeg" - packaging/build_wheel.sh - environment: - USE_FFMPEG: true - USE_OPENMP: false - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - binary_macos_conda: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout - - load_conda_channel_flags - - attach_workspace: - at: third_party - - run: - command: | - curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - sh conda.sh -b - source $HOME/miniconda3/bin/activate - conda install -yq conda-build - export FFMPEG_ROOT="${PWD}/third_party/ffmpeg" - packaging/build_conda.sh - environment: - USE_FFMPEG: true - USE_OPENMP: false - - store_artifacts: - path: /Users/distiller/miniconda3/conda-bld/osx-64 - - persist_to_workspace: - root: /Users/distiller/miniconda3 - paths: - - "conda-bld/*" - - binary_windows_wheel: - <<: *binary_common - executor: - name: windows-cpu - steps: - - checkout - - designate_upload_channel - - load_conda_channel_flags - - windows_install_cuda - - attach_workspace: - at: third_party - - run: - name: Build wheel packages - no_output_timeout: 30m - command: | - set -ex - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate base - export FFMPEG_ROOT="${PWD}/third_party/ffmpeg" - bash packaging/build_wheel.sh - environment: - USE_FFMPEG: true - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - binary_windows_conda: - <<: *binary_common - executor: - name: windows-cpu - steps: - - checkout - - load_conda_channel_flags - - windows_install_cuda - - attach_workspace: - at: third_party - - run: - name: Build conda packages - no_output_timeout: 30m - command: | - set -ex - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate base - conda install -yq conda-build "conda-package-handling!=1.5.0" - export FFMPEG_ROOT="${PWD}/third_party/ffmpeg" - bash packaging/build_conda.sh - environment: - USE_FFMPEG: true - - store_artifacts: - path: C:/tools/miniconda3/conda-bld/win-64 - - persist_to_workspace: - root: C:/tools/miniconda3 - paths: - - "conda-bld/*" - - # Requires org-member context - binary_conda_upload: - docker: - - image: continuumio/miniconda - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - command: | - # Prevent credential from leaking - conda install -yq anaconda-client - set -x - anaconda -t "${CONDA_PYTORCHBOT_TOKEN}" upload ~/workspace/conda-bld/*/*.tar.bz2 -u "pytorch-${UPLOAD_CHANNEL}" --label main --no-progress --force - - # Requires org-member context - binary_wheel_upload: - parameters: - subfolder: - description: "What whl subfolder to upload to, e.g., blank or cu100/ (trailing slash is important)" - type: string - docker: - - image: cimg/python:3.8 - steps: - - attach_workspace: - at: ~/workspace - - checkout - - designate_upload_channel - - run: - command: | - pip install --user awscli - export PATH="$HOME/.local/bin:$PATH" - # Prevent credential from leaking - set +x - export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}" - export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}" - set -x - for pkg in ~/workspace/*.whl; do - aws s3 cp "$pkg" "s3://pytorch/whl/${UPLOAD_CHANNEL}/<< parameters.subfolder >>" --acl public-read - done - - smoke_test_linux_conda: - <<: *smoke_test_common - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - load_conda_channel_flags - - run: - name: install binaries - no_output_timeout: 30m - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} pytorch cpuonly - conda install -v -y -c file://$HOME/workspace/conda-bld torchaudio - - checkout - - run: - name: smoke test - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - conda install 'ffmpeg<5' - ./test/smoke_test/run_smoke_test.sh - - smoke_test_linux_conda_gpu: - <<: *smoke_test_common - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - load_conda_channel_flags - - run: - name: install binaries - no_output_timeout: 30m - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - if [[ "$CU_VERSION" == cu116 || "$CU_VERSION" == cu117 ]]; then - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} -c nvidia pytorch pytorch-cuda=${CU_VERSION:2:2}.${CU_VERSION:4} - else - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} pytorch cudatoolkit=${CU_VERSION:2:2}.${CU_VERSION:4} - fi - conda install -v -y -c file://$HOME/workspace/conda-bld torchaudio - - checkout - - run: - name: smoke test - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - conda install 'ffmpeg<5' - ./test/smoke_test/run_smoke_test.sh - - smoke_test_linux_pip: - <<: *smoke_test_common - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - load_conda_channel_flags - - run: - name: install binaries - no_output_timeout: 30m - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - pip install $(ls ~/workspace/torchaudio*.whl) -f "https://download.pytorch.org/whl/${UPLOAD_CHANNEL}/${CU_VERSION}/torch_${UPLOAD_CHANNEL}.html" - - checkout - - run: - name: smoke test - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${HOME}/workspace/ffmpeg/lib" - ./test/smoke_test/run_smoke_test.sh - - smoke_test_windows_conda: - <<: *binary_common - executor: - name: windows-cpu - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - load_conda_channel_flags - - run: - name: install binaries - no_output_timeout: 30m - command: | - set -x - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda update -y conda - conda env remove -n python${PYTHON_VERSION} || true - conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} - conda activate python${PYTHON_VERSION} - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} pytorch cpuonly - conda install -v -y $(ls ~/workspace/conda-bld/win-64/torchaudio*.tar.bz2) - - checkout - - run: - name: smoke test - command: | - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate python${PYTHON_VERSION} - conda install 'ffmpeg<5' - ./test/smoke_test/run_smoke_test.sh - - smoke_test_windows_conda_gpu: - <<: *binary_common - executor: - name: windows-gpu - steps: - - checkout - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - load_conda_channel_flags - - windows_install_cuda - - run: - name: Update CUDA driver - command: packaging/windows/internal/driver_update.bat - - run: - name: install binaries - no_output_timeout: 30m - command: | - set -x - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda update -y conda - conda env remove -n python${PYTHON_VERSION} || true - conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} - conda activate python${PYTHON_VERSION} - # Include numpy and cudatoolkit in the install conda-forge chanell is used for cudatoolkit - - if [[ "$CU_VERSION" == cu116 || "$CU_VERSION" == cu117 ]]; then - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} -c nvidia pytorch numpy ffmpeg pytorch-cuda=${CU_VERSION:2:2}.${CU_VERSION:4} - else - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} pytorch numpy ffmpeg cudatoolkit=${CU_VERSION:2:2}.${CU_VERSION:4} - fi - # Install from torchaudio file - conda install -v -y $(ls ~/workspace/conda-bld/win-64/torchaudio*.tar.bz2) - - run: - name: smoke test - command: | - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate python${PYTHON_VERSION} - # Install sound backend - pip install PySoundFile - # conda install 'ffmpeg<5' - ./test/smoke_test/run_smoke_test.sh - - smoke_test_windows_pip: - <<: *binary_common - executor: - name: windows-cpu - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - load_conda_channel_flags - - run: - name: install binaries - no_output_timeout: 30m - command: | - set -x - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda update -y conda - conda env remove -n python${PYTHON_VERSION} || true - conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} - conda activate python${PYTHON_VERSION} - pip install $(ls ~/workspace/torchaudio*.whl) -f "https://download.pytorch.org/whl/${UPLOAD_CHANNEL}/${CU_VERSION}/torch_${UPLOAD_CHANNEL}.html" - - checkout - - run: - name: smoke test - command: | - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate python${PYTHON_VERSION} - # Hack to load FFmpeg libraries - # Note: Depending on Python version, they search different paths. - # For 3.7 and 3.9, copying them in CWD works. - cp ~/workspace/ffmpeg/bin/* test/smoke_test/ - # For 3.8 and 3.10, they must be in the same directory as the entrypoint lib - cp ~/workspace/ffmpeg/bin/* /C/tools/miniconda3/envs/python${PYTHON_VERSION}/lib/site-packages/torchaudio/lib/ - ./test/smoke_test/run_smoke_test.sh - - unittest_linux_cpu: - <<: *binary_common - docker: - - image: pytorch/torchaudio_unittest_base:manylinux-20210121 - resource_class: 2xlarge+ - steps: - - checkout - - attach_workspace: - at: third_party - - designate_upload_channel - - load_conda_channel_flags - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - run: - name: Install torchaudio - command: .circleci/unittest/linux/scripts/install.sh - environment: - USE_FFMPEG: true - - run: - name: Run tests - command: .circleci/unittest/linux/scripts/run_test.sh - environment: - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CUDA: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true - - store_test_results: - path: test-results - - store_artifacts: - path: test/htmlcov - unittest_linux_gpu: - <<: *binary_common - machine: - image: ubuntu-2004-cuda-11.4:202110-01 - resource_class: gpu.nvidia.medium - environment: - <<: *environment - CUDA_VERSION: 11.6 - image_name: pytorch/torchaudio_unittest_base:manylinux-cuda10.2-cudnn8-20210623 - steps: - - checkout - - attach_workspace: - at: third_party - - designate_upload_channel - - load_conda_channel_flags - - run: - name: Pull Docker image - command: docker pull --quiet "${image_name}" - - run: - name: Setup - command: docker run -t --gpus all -e PYTHON_VERSION -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/setup_env.sh - - run: - name: Install torchaudio - command: docker run -t --gpus all -e UPLOAD_CHANNEL -e CONDA_CHANNEL_FLAGS -e CUDA_VERSION -e USE_FFMPEG=1 -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/install.sh - - run: - name: Run tests - environment: - TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true - command: | - docker run -t --gpus all -v $PWD:$PWD -w $PWD -e "CI=${CI}" -e TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310 "${image_name}" .circleci/unittest/linux/scripts/run_test.sh - - store_test_results: - path: test-results - - store_artifacts: - path: test/htmlcov - - unittest_windows_cpu: - <<: *binary_common - executor: - name: windows-cpu - steps: - - checkout - - designate_upload_channel - - load_conda_channel_flags - - run: - name: Setup - command: .circleci/unittest/windows/scripts/setup_env.sh - - run: - name: Install torchaudio - command: .circleci/unittest/windows/scripts/install.sh - environment: - USE_FFMPEG: true - - run: - name: Run tests - command: .circleci/unittest/windows/scripts/run_test.sh - environment: - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_APPLY_CMVN_SLIDING: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_FBANK_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_KALDI_PITCH_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_MFCC_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_SPECTROGRAM_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_SOX: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CUDA: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_KALDI: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_sentencepiece: true - - store_test_results: - path: test-results - - store_artifacts: - path: test/htmlcov - - unittest_windows_gpu: - <<: *binary_common - executor: - name: windows-gpu - environment: - <<: *environment - CUDA_VERSION: "11.6" - steps: - - checkout - - designate_upload_channel - - load_conda_channel_flags - - run: - name: Setup - command: .circleci/unittest/windows/scripts/setup_env.sh - - run: - name: Install CUDA - command: packaging/windows/internal/cuda_install.bat - - run: - name: Update CUDA driver - command: packaging/windows/internal/driver_update.bat - - run: - name: Install torchaudio - command: .circleci/unittest/windows/scripts/install.sh - environment: - USE_FFMPEG: true - - run: - name: Run tests - command: .circleci/unittest/windows/scripts/run_test.sh - environment: - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_APPLY_CMVN_SLIDING: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_FBANK_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_KALDI_PITCH_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_MFCC_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_SPECTROGRAM_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_SOX: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_KALDI: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_sentencepiece: true - - store_test_results: - path: test-results - - store_artifacts: - path: test/htmlcov - - unittest_macos_cpu: - <<: *binary_common - macos: - xcode: "14.0" - resource_class: large - steps: - - checkout - - load_conda_channel_flags - - attach_workspace: - at: third_party - - designate_upload_channel - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - run: - name: Install torchaudio - command: .circleci/unittest/linux/scripts/install.sh - environment: - USE_FFMPEG: true - USE_OPENMP: false - - run: - name: Run tests - command: .circleci/unittest/linux/scripts/run_test.sh - environment: - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_APPLY_CMVN_SLIDING: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_FBANK_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_KALDI_PITCH_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_MFCC_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_SPECTROGRAM_FEATS: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CUDA: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_QUANTIZATION: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true - TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_sentencepiece: true - - store_test_results: - path: test-results - - store_artifacts: - path: test/htmlcov - - stylecheck: - <<: *binary_common - docker: - - image: "pytorch/torchaudio_unittest_base:manylinux" - resource_class: medium - steps: - - checkout - - designate_upload_channel - - load_conda_channel_flags - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - run: - name: Run style check - command: .circleci/unittest/linux/scripts/run_style_checks.sh - - build_docs: - <<: *smoke_test_common - resource_class: 2xlarge+ - steps: - - attach_workspace: - at: ~/workspace - - checkout - - designate_upload_channel - - load_conda_channel_flags - - run: - name: Install packages - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - if [[ "$CU_VERSION" == cu116 || "$CU_VERSION" == cu117 ]]; then - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} -c nvidia pytorch pytorch-cuda=${CU_VERSION:2:2}.${CU_VERSION:4} - else - conda install -v -y -c pytorch-${UPLOAD_CHANNEL} pytorch cudatoolkit=${CU_VERSION:2:2}.${CU_VERSION:4} - fi - conda install -v -y -c file://$HOME/workspace/conda-bld torchaudio - # gxx_linux-64 is for installing pesq library that depends on cython - conda install -y pandoc 'ffmpeg<5' gxx_linux-64 - apt update -qq && apt-get -qq install -y git make - pip install --progress-bar off -r docs/requirements.txt -r docs/requirements-tutorials.txt - - run: - name: Build docs - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - cd docs - make 'SPHINXOPTS=-W' html - cd build - tar -czf artifact.tar.gz html - mv artifact.tar.gz html - environment: - BUILD_GALLERY: 1 - TORCH_SHOW_CPP_STACKTRACES: 1 - no_output_timeout: 30m - - persist_to_workspace: - root: ./ - paths: - - "*" - - store_artifacts: - path: ./docs/build/html - destination: docs - - upload_docs: - <<: *binary_common - docker: - - image: "pytorch/manylinux-cuda100" - resource_class: 2xlarge+ - steps: - - attach_workspace: - at: ~/workspace - - run: - name: Generate netrc - command: | - # set credentials for https pushing - # requires the org-member context - cat > ~/.netrc \<> ~/.bashrc -RUN source /usr/local/etc/profile.d/conda.sh && conda activate python3.7 && conda install -y -c conda-forge sox && conda install -y numpy -RUN source /usr/local/etc/profile.d/conda.sh && conda activate python3.8 && conda install -y -c conda-forge sox && conda install -y numpy -RUN source /usr/local/etc/profile.d/conda.sh && conda activate python3.9 && conda install -y -c conda-forge sox && conda install -y numpy -RUN source /usr/local/etc/profile.d/conda.sh && conda activate python3.10 && conda install -y -c conda-forge sox && conda install -y numpy -CMD [ "/bin/bash"] diff --git a/.circleci/smoke_test/docker/build_and_push.sh b/.circleci/smoke_test/docker/build_and_push.sh deleted file mode 100755 index 092d21de..00000000 --- a/.circleci/smoke_test/docker/build_and_push.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -datestr="$(date "+%Y%m%d")" -image="pytorch/torchaudio_unittest_base:smoke_test-${datestr}" -docker build -t "${image}" . -docker push "${image}" diff --git a/.circleci/unittest/linux/README.md b/.circleci/unittest/linux/README.md deleted file mode 100644 index 0a4b0e0e..00000000 --- a/.circleci/unittest/linux/README.md +++ /dev/null @@ -1,6 +0,0 @@ -This directory contains; - - - docker - Docker image definition and scripts to build and update Docker image for unittest. - - scripts - Scripts used by CircleCI to run unit tests. diff --git a/.circleci/unittest/linux/docker/.dockerignore b/.circleci/unittest/linux/docker/.dockerignore deleted file mode 100644 index 1398d409..00000000 --- a/.circleci/unittest/linux/docker/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!scripts diff --git a/.circleci/unittest/linux/docker/.gitignore b/.circleci/unittest/linux/docker/.gitignore deleted file mode 100644 index 7e977058..00000000 --- a/.circleci/unittest/linux/docker/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -scripts/build_third_parties.sh -Dockerfile.tmp diff --git a/.circleci/unittest/linux/docker/Dockerfile b/.circleci/unittest/linux/docker/Dockerfile deleted file mode 100644 index c47a8963..00000000 --- a/.circleci/unittest/linux/docker/Dockerfile +++ /dev/null @@ -1,56 +0,0 @@ -FROM ubuntu:18.04 as builder - -RUN apt update -q - -################################################################################ -# Build Kaldi -################################################################################ -RUN apt install -q -y \ - autoconf \ - automake \ - bzip2 \ - g++ \ - gfortran \ - git \ - libatlas-base-dev \ - libtool \ - make \ - python2.7 \ - python3 \ - sox \ - subversion \ - unzip \ - wget \ - zlib1g-dev - -# KALDI uses MKL as a default math library, but we are going to copy featbin binaries and dependent -# shared libraries to the final image, so we use ATLAS, which is easy to reinstall in the final image. -RUN git clone --depth 1 https://github.com/kaldi-asr/kaldi.git /opt/kaldi && \ - cd /opt/kaldi/tools && \ - make -j $(nproc) && \ - cd /opt/kaldi/src && \ - ./configure --shared --mathlib=ATLAS --use-cuda=no && \ - make featbin -j $(nproc) - -# Copy featbins and dependent libraries -ADD ./scripts /scripts -RUN bash /scripts/copy_kaldi_executables.sh /opt/kaldi /kaldi - -################################################################################ -# Build the final image -################################################################################ -FROM BASE_IMAGE -RUN apt update && apt install -y \ - g++ \ - gfortran \ - git \ - libatlas3-base \ - libsndfile1 \ - wget \ - curl \ - make \ - file \ - pkg-config \ - && rm -rf /var/lib/apt/lists/* -COPY --from=builder /kaldi /kaldi -ENV PATH="${PATH}:/kaldi/bin" LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/kaldi/lib" diff --git a/.circleci/unittest/linux/docker/build_and_push.sh b/.circleci/unittest/linux/docker/build_and_push.sh deleted file mode 100755 index e7ced13a..00000000 --- a/.circleci/unittest/linux/docker/build_and_push.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -if [ $# -ne 1 ]; then - printf "Usage %s \n\n" "$0" - exit 1 -fi - -datestr="$(date "+%Y%m%d")" -if [ "$1" = "cpu" ]; then - base_image="ubuntu:18.04" - image="pytorch/torchaudio_unittest_base:manylinux-${datestr}" -else - base_image="nvidia/cuda:$1-devel-ubuntu18.04" - docker pull "${base_image}" - image="pytorch/torchaudio_unittest_base:manylinux-cuda$1-${datestr}" -fi - -cd "$( dirname "${BASH_SOURCE[0]}" )" - -# docker build also accepts reading from STDIN -# but in that case, no context (other files) can be passed, so we write out Dockerfile -sed "s|BASE_IMAGE|${base_image}|g" Dockerfile > Dockerfile.tmp -docker build -t "${image}" -f Dockerfile.tmp . -docker push "${image}" diff --git a/.circleci/unittest/linux/docker/scripts/copy_kaldi_executables.sh b/.circleci/unittest/linux/docker/scripts/copy_kaldi_executables.sh deleted file mode 100755 index b0cf2071..00000000 --- a/.circleci/unittest/linux/docker/scripts/copy_kaldi_executables.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bash - -list_executables() { - # List up executables in the given directory - find "$1" -type f -executable -} - -list_kaldi_libraries() { - # List up shared libraries used by executables found in the given directory ($1) - # that reside in Kaldi directory ($2) - while read file; do - ldd "${file}" | grep -o "${2}.* "; - done < <(list_executables "$1") | sort -u -} - -set -euo pipefail - -kaldi_root="$(realpath "$1")" -target_dir="$(realpath "$2")" - -bin_dir="${target_dir}/bin" -lib_dir="${target_dir}/lib" - -mkdir -p "${bin_dir}" "${lib_dir}" - -# 1. Copy featbins -printf "Copying executables to %s\n" "${bin_dir}" -while read file; do - printf " %s\n" "${file}" - cp "${file}" "${bin_dir}" -done < <(list_executables "${kaldi_root}/src/featbin") - -# 2. Copy dependent libraries from Kaldi -printf "Copying libraries to %s\n" "${lib_dir}" -while read file; do - printf " %s\n" "$file" - # If it is not symlink, just copy to the target directory - if [ ! -L "${file}" ]; then - cp "${file}" "${lib_dir}" - continue - fi - - # If it is symlink, - # 1. Copy the actual library to the target directory. - library="$(realpath "${file}")" - cp "${library}" "${lib_dir}" - # 2. then if the name of the symlink is different from the actual library name, - # create the symlink in the target directory. - lib_name="$(basename "${library}")" - link_name="$(basename "${file}")" - if [ "${lib_name}" != "${link_name}" ]; then - printf " Linking %s -> %s\n" "${lib_name}" "${link_name}" - ( - cd "${lib_dir}" - ln -sf "${lib_name}" "${link_name}" - ) - fi -done < <(list_kaldi_libraries "${bin_dir}" "${kaldi_root}") diff --git a/.circleci/unittest/linux/scripts/install.sh b/.circleci/unittest/linux/scripts/install.sh deleted file mode 100755 index 2a9c41f0..00000000 --- a/.circleci/unittest/linux/scripts/install.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env bash - -unset PYTORCH_VERSION -# For unittest, nightly PyTorch is used as the following section, -# so no need to set PYTORCH_VERSION. -# In fact, keeping PYTORCH_VERSION forces us to hardcode PyTorch version in config. - -set -e - -root_dir="$(git rev-parse --show-toplevel)" -conda_dir="${root_dir}/conda" -env_dir="${root_dir}/env" - -cd "${root_dir}" - -case "$(uname -s)" in - Darwin*) os=MacOSX;; - *) os=Linux -esac - -# 0. Activate conda env -eval "$("${conda_dir}/bin/conda" shell.bash hook)" -conda activate "${env_dir}" - -# 1. Install PyTorch -if [ -z "${CUDA_VERSION:-}" ] ; then - if [ "${os}" == MacOSX ] ; then - cudatoolkit='' - else - cudatoolkit="cpuonly" - fi - version="cpu" -else - version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")" - export CUDATOOLKIT_CHANNEL="nvidia" - cudatoolkit="pytorch-cuda=${version}" -fi - -printf "Installing PyTorch with %s\n" "${cudatoolkit}" -( - if [ "${os}" == MacOSX ] ; then - # TODO: this can be removed as soon as linking issue could be resolved - # see https://github.com/pytorch/pytorch/issues/62424 from details - MKL_CONSTRAINT='mkl==2021.2.0' - pytorch_build=pytorch - else - MKL_CONSTRAINT='' - pytorch_build="pytorch[build="*${version}*"]" - fi - set -x - - if [[ -z "$cudatoolkit" ]]; then - conda install ${CONDA_CHANNEL_FLAGS:-} -y -c "pytorch-${UPLOAD_CHANNEL}" $MKL_CONSTRAINT "pytorch-${UPLOAD_CHANNEL}::${pytorch_build}" - else - conda install pytorch ${cudatoolkit} ${CONDA_CHANNEL_FLAGS:-} -y -c "pytorch-${UPLOAD_CHANNEL}" -c nvidia $MKL_CONSTRAINT - fi -) - -# 2. Install torchaudio -printf "* Installing torchaudio\n" -python setup.py install - -# 3. Install Test tools -printf "* Installing test tools\n" -NUMBA_DEV_CHANNEL="" -if [[ "$(python --version)" = *3.9* || "$(python --version)" = *3.10* ]]; then - # Numba isn't available for Python 3.9 and 3.10 except on the numba dev channel and building from source fails - # See https://github.com/librosa/librosa/issues/1270#issuecomment-759065048 - NUMBA_DEV_CHANNEL="-c numba/label/dev" -fi -# Note: installing librosa via pip fail because it will try to compile numba. -( - set -x - conda install -y -c conda-forge ${NUMBA_DEV_CHANNEL} 'librosa>=0.8.0' parameterized 'requests>=2.20' - pip install kaldi-io SoundFile coverage pytest pytest-cov 'scipy==1.7.3' transformers expecttest unidecode inflect Pillow sentencepiece pytorch-lightning 'protobuf<4.21.0' demucs tinytag -) -# Install fairseq -git clone https://github.com/pytorch/fairseq -cd fairseq -git checkout e47a4c8 -pip install . diff --git a/.circleci/unittest/linux/scripts/run_clang_format.py b/.circleci/unittest/linux/scripts/run_clang_format.py deleted file mode 100755 index 250cc6e3..00000000 --- a/.circleci/unittest/linux/scripts/run_clang_format.py +++ /dev/null @@ -1,310 +0,0 @@ -#!/usr/bin/env python -"""A wrapper script around clang-format, suitable for linting multiple files -and to use for continuous integration. - -This is an alternative API for the clang-format command line. -It runs over multiple files and directories in parallel. -A diff output is produced and a sensible exit code is returned. - -""" - -import argparse -import codecs -import difflib -import fnmatch -import io -import multiprocessing -import os -import signal -import subprocess -import sys -import traceback -from functools import partial - -try: - from subprocess import DEVNULL # py3k -except ImportError: - DEVNULL = open(os.devnull, "wb") - - -DEFAULT_EXTENSIONS = "c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,cu" - - -class ExitStatus: - SUCCESS = 0 - DIFF = 1 - TROUBLE = 2 - - -def list_files(files, recursive=False, extensions=None, exclude=None): - if extensions is None: - extensions = [] - if exclude is None: - exclude = [] - - out = [] - for file in files: - if recursive and os.path.isdir(file): - for dirpath, dnames, fnames in os.walk(file): - fpaths = [os.path.join(dirpath, fname) for fname in fnames] - for pattern in exclude: - # os.walk() supports trimming down the dnames list - # by modifying it in-place, - # to avoid unnecessary directory listings. - dnames[:] = [x for x in dnames if not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)] - fpaths = [x for x in fpaths if not fnmatch.fnmatch(x, pattern)] - for f in fpaths: - ext = os.path.splitext(f)[1][1:] - if ext in extensions: - out.append(f) - else: - out.append(file) - return out - - -def make_diff(file, original, reformatted): - return list( - difflib.unified_diff( - original, reformatted, fromfile="{}\t(original)".format(file), tofile="{}\t(reformatted)".format(file), n=3 - ) - ) - - -class DiffError(Exception): - def __init__(self, message, errs=None): - super(DiffError, self).__init__(message) - self.errs = errs or [] - - -class UnexpectedError(Exception): - def __init__(self, message, exc=None): - super(UnexpectedError, self).__init__(message) - self.formatted_traceback = traceback.format_exc() - self.exc = exc - - -def run_clang_format_diff_wrapper(args, file): - try: - ret = run_clang_format_diff(args, file) - return ret - except DiffError: - raise - except Exception as e: - raise UnexpectedError("{}: {}: {}".format(file, e.__class__.__name__, e), e) - - -def run_clang_format_diff(args, file): - try: - with io.open(file, "r", encoding="utf-8") as f: - original = f.readlines() - except IOError as exc: - raise DiffError(str(exc)) - invocation = [args.clang_format_executable, file] - - # Use of utf-8 to decode the process output. - # - # Hopefully, this is the correct thing to do. - # - # It's done due to the following assumptions (which may be incorrect): - # - clang-format will returns the bytes read from the files as-is, - # without conversion, and it is already assumed that the files use utf-8. - # - if the diagnostics were internationalized, they would use utf-8: - # > Adding Translations to Clang - # > - # > Not possible yet! - # > Diagnostic strings should be written in UTF-8, - # > the client can translate to the relevant code page if needed. - # > Each translation completely replaces the format string - # > for the diagnostic. - # > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation - - try: - proc = subprocess.Popen( - invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, encoding="utf-8" - ) - except OSError as exc: - raise DiffError("Command '{}' failed to start: {}".format(subprocess.list2cmdline(invocation), exc)) - proc_stdout = proc.stdout - proc_stderr = proc.stderr - - # hopefully the stderr pipe won't get full and block the process - outs = list(proc_stdout.readlines()) - errs = list(proc_stderr.readlines()) - proc.wait() - if proc.returncode: - raise DiffError( - "Command '{}' returned non-zero exit status {}".format( - subprocess.list2cmdline(invocation), proc.returncode - ), - errs, - ) - return make_diff(file, original, outs), errs - - -def bold_red(s): - return "\x1b[1m\x1b[31m" + s + "\x1b[0m" - - -def colorize(diff_lines): - def bold(s): - return "\x1b[1m" + s + "\x1b[0m" - - def cyan(s): - return "\x1b[36m" + s + "\x1b[0m" - - def green(s): - return "\x1b[32m" + s + "\x1b[0m" - - def red(s): - return "\x1b[31m" + s + "\x1b[0m" - - for line in diff_lines: - if line[:4] in ["--- ", "+++ "]: - yield bold(line) - elif line.startswith("@@ "): - yield cyan(line) - elif line.startswith("+"): - yield green(line) - elif line.startswith("-"): - yield red(line) - else: - yield line - - -def print_diff(diff_lines, use_color): - if use_color: - diff_lines = colorize(diff_lines) - sys.stdout.writelines(diff_lines) - - -def print_trouble(prog, message, use_colors): - error_text = "error:" - if use_colors: - error_text = bold_red(error_text) - print("{}: {} {}".format(prog, error_text, message), file=sys.stderr) - - -def main(): - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument( - "--clang-format-executable", - metavar="EXECUTABLE", - help="path to the clang-format executable", - default="clang-format", - ) - parser.add_argument( - "--extensions", - help="comma separated list of file extensions (default: {})".format(DEFAULT_EXTENSIONS), - default=DEFAULT_EXTENSIONS, - ) - parser.add_argument("-r", "--recursive", action="store_true", help="run recursively over directories") - parser.add_argument("files", metavar="file", nargs="+") - parser.add_argument("-q", "--quiet", action="store_true") - parser.add_argument( - "-j", - metavar="N", - type=int, - default=0, - help="run N clang-format jobs in parallel" " (default number of cpus + 1)", - ) - parser.add_argument( - "--color", default="auto", choices=["auto", "always", "never"], help="show colored diff (default: auto)" - ) - parser.add_argument( - "-e", - "--exclude", - metavar="PATTERN", - action="append", - default=[], - help="exclude paths matching the given glob-like pattern(s)" " from recursive search", - ) - - args = parser.parse_args() - - # use default signal handling, like diff return SIGINT value on ^C - # https://bugs.python.org/issue14229#msg156446 - signal.signal(signal.SIGINT, signal.SIG_DFL) - try: - signal.SIGPIPE - except AttributeError: - # compatibility, SIGPIPE does not exist on Windows - pass - else: - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - - colored_stdout = False - colored_stderr = False - if args.color == "always": - colored_stdout = True - colored_stderr = True - elif args.color == "auto": - colored_stdout = sys.stdout.isatty() - colored_stderr = sys.stderr.isatty() - - version_invocation = [args.clang_format_executable, str("--version")] - try: - subprocess.check_call(version_invocation, stdout=DEVNULL) - except subprocess.CalledProcessError as e: - print_trouble(parser.prog, str(e), use_colors=colored_stderr) - return ExitStatus.TROUBLE - except OSError as e: - print_trouble( - parser.prog, - "Command '{}' failed to start: {}".format(subprocess.list2cmdline(version_invocation), e), - use_colors=colored_stderr, - ) - return ExitStatus.TROUBLE - - retcode = ExitStatus.SUCCESS - files = list_files( - args.files, recursive=args.recursive, exclude=args.exclude, extensions=args.extensions.split(",") - ) - - if not files: - return - - njobs = args.j - if njobs == 0: - njobs = multiprocessing.cpu_count() + 1 - njobs = min(len(files), njobs) - - if njobs == 1: - # execute directly instead of in a pool, - # less overhead, simpler stacktraces - it = (run_clang_format_diff_wrapper(args, file) for file in files) - pool = None - else: - pool = multiprocessing.Pool(njobs) - it = pool.imap_unordered(partial(run_clang_format_diff_wrapper, args), files) - while True: - try: - outs, errs = next(it) - except StopIteration: - break - except DiffError as e: - print_trouble(parser.prog, str(e), use_colors=colored_stderr) - retcode = ExitStatus.TROUBLE - sys.stderr.writelines(e.errs) - except UnexpectedError as e: - print_trouble(parser.prog, str(e), use_colors=colored_stderr) - sys.stderr.write(e.formatted_traceback) - retcode = ExitStatus.TROUBLE - # stop at the first unexpected error, - # something could be very wrong, - # don't process all files unnecessarily - if pool: - pool.terminate() - break - else: - sys.stderr.writelines(errs) - if outs == []: - continue - if not args.quiet: - print_diff(outs, use_color=colored_stdout) - if retcode == ExitStatus.SUCCESS: - retcode = ExitStatus.DIFF - return retcode - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/.circleci/unittest/linux/scripts/run_style_checks.sh b/.circleci/unittest/linux/scripts/run_style_checks.sh deleted file mode 100755 index 0620f486..00000000 --- a/.circleci/unittest/linux/scripts/run_style_checks.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash - -set -eux - -root_dir="$(git rev-parse --show-toplevel)" -conda_dir="${root_dir}/conda" -env_dir="${root_dir}/env" -this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" - -eval "$("${conda_dir}/bin/conda" shell.bash hook)" -conda activate "${env_dir}" - -# 1. Install tools -conda install -y flake8==3.9.2 -printf "Installed flake8: " -flake8 --version - -clangformat_path="${root_dir}/clang-format" -curl https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64 -o "${clangformat_path}" -chmod +x "${clangformat_path}" -printf "Installed clang-fortmat" -"${clangformat_path}" --version - -# 2. Run style checks -# We want to run all the style checks even if one of them fail. - -set +e - -exit_status=0 - -printf "\x1b[34mRunning flake8:\x1b[0m\n" -flake8 torchaudio test tools/setup_helpers docs/source/conf.py examples -status=$? -exit_status="$((exit_status+status))" -if [ "${status}" -ne 0 ]; then - printf "\x1b[31mflake8 failed. Check the format of Python files.\x1b[0m\n" -fi - -printf "\x1b[34mRunning clang-format:\x1b[0m\n" -"${this_dir}"/run_clang_format.py \ - -r torchaudio/csrc third_party/kaldi/src \ - --clang-format-executable "${clangformat_path}" \ - && git diff --exit-code -status=$? -exit_status="$((exit_status+status))" -if [ "${status}" -ne 0 ]; then - printf "\x1b[31mC++ files are not formatted. Please use clang-format to format CPP files.\x1b[0m\n" -fi -exit $exit_status diff --git a/.circleci/unittest/linux/scripts/run_test.sh b/.circleci/unittest/linux/scripts/run_test.sh deleted file mode 100755 index 83a9196d..00000000 --- a/.circleci/unittest/linux/scripts/run_test.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -set -e - -eval "$(./conda/bin/conda shell.bash hook)" -conda activate ./env - -python -m torch.utils.collect_env -env | grep TORCHAUDIO || true - -export PATH="${PWD}/third_party/install/bin/:${PATH}" - -declare -a args=( - '-v' - '--cov=torchaudio' - "--junitxml=${PWD}/test-results/junit.xml" - '--durations' '20' -) - -cd test -pytest "${args[@]}" torchaudio_unittest -coverage html diff --git a/.circleci/unittest/linux/scripts/setup_env.sh b/.circleci/unittest/linux/scripts/setup_env.sh deleted file mode 100755 index b2bf6ccb..00000000 --- a/.circleci/unittest/linux/scripts/setup_env.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash - -# This script is for setting up environment in which unit test is ran. -# To speed up the CI time, the resulting environment is cached. -# -# Do not install PyTorch and torchaudio here, otherwise they also get cached. - -set -ex - -root_dir="$(git rev-parse --show-toplevel)" -conda_dir="${root_dir}/conda" -env_dir="${root_dir}/env" - -cd "${root_dir}" - -case "$(uname -s)" in - Darwin*) os=MacOSX;; - *) os=Linux -esac - -# 1. Install conda at ./conda -if [ ! -d "${conda_dir}" ]; then - printf "* Installing conda\n" - curl --silent -L -o miniconda.sh "http://repo.continuum.io/miniconda/Miniconda3-latest-${os}-x86_64.sh" - bash ./miniconda.sh -b -f -p "${conda_dir}" -fi -eval "$("${conda_dir}/bin/conda" shell.bash hook)" - - -# 2. Create test environment at ./env -if [ ! -d "${env_dir}" ]; then - printf "* Creating a test environment with PYTHON_VERSION=%s\n" "${PYTHON_VERSION}\n" - conda create --prefix "${env_dir}" -y python="${PYTHON_VERSION}" -fi -conda activate "${env_dir}" - -# 3. Install minimal build tools -pip --quiet install cmake ninja -conda install --quiet -y 'ffmpeg>=4.1' pkg-config diff --git a/.circleci/unittest/windows/README.md b/.circleci/unittest/windows/README.md deleted file mode 100644 index 2c06af62..00000000 --- a/.circleci/unittest/windows/README.md +++ /dev/null @@ -1,4 +0,0 @@ -This directory contains; - - - scripts - Scripts used by CircleCI to run unit tests. diff --git a/.circleci/unittest/windows/scripts/environment.yml b/.circleci/unittest/windows/scripts/environment.yml deleted file mode 100644 index 16225c9e..00000000 --- a/.circleci/unittest/windows/scripts/environment.yml +++ /dev/null @@ -1,16 +0,0 @@ -channels: - - defaults -dependencies: - - flake8 - - pytest - - pytest-cov - - codecov - - scipy >= 1.4.1 - - pip - - pip: - - kaldi-io - - PySoundFile - - future - - parameterized - - dataclasses - - expecttest diff --git a/.circleci/unittest/windows/scripts/install.sh b/.circleci/unittest/windows/scripts/install.sh deleted file mode 100644 index c1a308ec..00000000 --- a/.circleci/unittest/windows/scripts/install.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env bash - -unset PYTORCH_VERSION -# For unittest, nightly PyTorch is used as the following section, -# so no need to set PYTORCH_VERSION. -# In fact, keeping PYTORCH_VERSION forces us to hardcode PyTorch version in config. - -set -ex - -root_dir="$(git rev-parse --show-toplevel)" -conda_dir="${root_dir}/conda" -env_dir="${root_dir}/env" -this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" - -cd "${root_dir}" - -# 0. Activate conda env -eval "$("${conda_dir}/Scripts/conda.exe" 'shell.bash' 'hook')" -conda activate "${env_dir}" - -source "$this_dir/set_cuda_envs.sh" - -# 1. Install PyTorch -if [ -z "${CUDA_VERSION:-}" ] ; then - cudatoolkit="cpuonly" - version="cpu" -else - version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")" - - cuda_toolkit_pckg="cudatoolkit" - if [[ "$CU_VERSION" == cu116 || "$CU_VERSION" == cu117 ]]; then - cuda_toolkit_pckg="pytorch-cuda" - fi - - cudatoolkit="${cuda_toolkit_pckg}=${version}" -fi -printf "Installing PyTorch with %s\n" "${cudatoolkit}" -conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c nvidia pytorch "${cudatoolkit}" pytest -conda install -y -c conda-forge mkl=2020.4 - -torch_cuda=$(python -c "import torch; print(torch.cuda.is_available())") -echo torch.cuda.is_available is $torch_cuda - -if [ ! -z "${CUDA_VERSION:-}" ] ; then - if [ "$torch_cuda" == "False" ]; then - echo "torch with cuda installed but torch.cuda.is_available() is False" - exit 1 - fi -fi - -# 2. Install torchaudio -printf "* Installing torchaudio\n" -"$root_dir/packaging/vc_env_helper.bat" python setup.py install - -# 3. Install Test tools -printf "* Installing test tools\n" -NUMBA_DEV_CHANNEL="" -SENTENCEPIECE_DEPENDENCY="sentencepiece" -case "$(python --version)" in - *3.9*) - # Numba isn't available for Python 3.9 except on the numba dev channel and building from source fails - # See https://github.com/librosa/librosa/issues/1270#issuecomment-759065048 - NUMBA_DEV_CHANNEL="-c numba/label/dev" - ;; - *3.10*) - # Don't install sentencepiece, no python 3.10 dependencies available for windows yet - SENTENCEPIECE_DEPENDENCY="" - NUMBA_DEV_CHANNEL="-c numba/label/dev" - ;; -esac -# Note: installing librosa via pip fail because it will try to compile numba. -( - set -x - conda install -y -c conda-forge ${NUMBA_DEV_CHANNEL} 'librosa>=0.8.0' parameterized 'requests>=2.20' - # Need to disable shell check since this'll fail out if SENTENCEPIECE_DEPENDENCY is empty - # shellcheck disable=SC2086 - pip install \ - ${SENTENCEPIECE_DEPENDENCY} \ - Pillow \ - SoundFile \ - coverage \ - expecttest \ - inflect \ - kaldi-io \ - pytest \ - pytest-cov \ - pytorch-lightning \ - 'scipy==1.7.3' \ - transformers \ - unidecode \ - 'protobuf<4.21.0' \ - demucs \ - tinytag -) -# Install fairseq -git clone https://github.com/pytorch/fairseq -cd fairseq -git checkout e47a4c8 -pip install . diff --git a/.circleci/unittest/windows/scripts/install_conda.bat b/.circleci/unittest/windows/scripts/install_conda.bat deleted file mode 100644 index 6052ad08..00000000 --- a/.circleci/unittest/windows/scripts/install_conda.bat +++ /dev/null @@ -1 +0,0 @@ -start /wait "" "%miniconda_exe%" /S /InstallationType=JustMe /RegisterPython=0 /AddToPath=0 /D=%tmp_conda% diff --git a/.circleci/unittest/windows/scripts/run_test.sh b/.circleci/unittest/windows/scripts/run_test.sh deleted file mode 100644 index 22a53911..00000000 --- a/.circleci/unittest/windows/scripts/run_test.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -eval "$(./conda/Scripts/conda.exe 'shell.bash' 'hook')" -conda activate ./env - -this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -source "$this_dir/set_cuda_envs.sh" - -python -m torch.utils.collect_env -env | grep TORCHAUDIO || true - -cd test -pytest --cov=torchaudio --junitxml=../test-results/junit.xml -v --durations 20 torchaudio_unittest -coverage html diff --git a/.circleci/unittest/windows/scripts/set_cuda_envs.sh b/.circleci/unittest/windows/scripts/set_cuda_envs.sh deleted file mode 100644 index 37b53d02..00000000 --- a/.circleci/unittest/windows/scripts/set_cuda_envs.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash -set -ex - -echo CU_VERSION is "${CU_VERSION}" -echo CUDA_VERSION is "${CUDA_VERSION}" - -# Currenly, CU_VERSION and CUDA_VERSION are not consistent. -# to understand this code, please checck out https://github.com/pytorch/vision/issues/4443 -version="cpu" -if [[ ! -z "${CUDA_VERSION}" ]] ; then - version="$CUDA_VERSION" -else - if [[ ${#CU_VERSION} -eq 5 ]]; then - version="${CU_VERSION:2:2}.${CU_VERSION:4:1}" - fi -fi - -# Don't use if [[ "$version" == "cpu" ]]; then exit 0 fi. -# It would exit the shell. One result is cpu tests would not run if the shell exit. -# Unless there's an error, Don't exit. -if [[ "$version" != "cpu" ]]; then - # set cuda envs - export PATH="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${version}/bin:/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${version}/libnvvp:$PATH" - export CUDA_PATH_V${version/./_}="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v${version}" - export CUDA_PATH="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v${version}" - - if [ ! -d "$CUDA_PATH" ] - then - echo "$CUDA_PATH" does not exist - exit 1 - fi - - # check cuda driver version - for path in '/c/Program Files/NVIDIA Corporation/NVSMI/nvidia-smi.exe' /c/Windows/System32/nvidia-smi.exe; do - if [[ -x "$path" ]]; then - "$path" || echo "true"; - break - fi - done - - which nvcc - nvcc --version - env | grep CUDA -fi diff --git a/.circleci/unittest/windows/scripts/setup_env.sh b/.circleci/unittest/windows/scripts/setup_env.sh deleted file mode 100644 index 7555edc0..00000000 --- a/.circleci/unittest/windows/scripts/setup_env.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash - -# This script is for setting up environment in which unit test is ran. -# To speed up the CI time, the resulting environment is cached. -# -# Do not install PyTorch and torchaudio here, otherwise they also get cached. - -set -e - -this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -root_dir="$(git rev-parse --show-toplevel)" -conda_dir="${root_dir}/conda" -env_dir="${root_dir}/env" - -cd "${root_dir}" - -# 1. Install conda at ./conda -if [ ! -d "${conda_dir}" ]; then - printf "* Installing conda\n" - export tmp_conda="$(echo $conda_dir | tr '/' '\\')" - export miniconda_exe="$(echo $root_dir | tr '/' '\\')\\miniconda.exe" - curl --silent --output miniconda.exe https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe -O - "$this_dir/install_conda.bat" - unset tmp_conda - unset miniconda_exe -fi -eval "$("${conda_dir}/Scripts/conda.exe" 'shell.bash' 'hook')" - -# 2. Create test environment at ./env -if [ ! -d "${env_dir}" ]; then - printf "* Creating a test environment with PYTHON_VERSION=%s\n" "${PYTHON_VERSION}" - conda create --prefix "${env_dir}" -y python="${PYTHON_VERSION}" -fi -conda activate "${env_dir}" - -# 3. Install minimal build tools -pip --quiet install cmake ninja -conda install --quiet -y 'ffmpeg>=4.1' diff --git a/.flake8 b/.flake8 index b4e3878d..dae85fef 100644 --- a/.flake8 +++ b/.flake8 @@ -1,4 +1,12 @@ [flake8] +# Note: it's recommended to use `pre-commit run -a flake8` + max-line-length = 120 -ignore = E203,E305,E402,E721,E741,F405,W503,W504,F999 -exclude = build,docs/source,_ext,third_party,examples/tutorials +ignore = E203,E402,E741,W503 + +# Note: exclude is not honnored when flake8 is executed from pre-commit. +# pre-commit has a separate config +exclude = build,docs/src,third_party + +per-file-ignores = + examples/tutorials/*.py: E501 diff --git a/.gitignore b/.gitignore index 4c0cd995..4a4c401d 100644 --- a/.gitignore +++ b/.gitignore @@ -69,6 +69,7 @@ instance/ # Sphinx documentation docs/_build/ docs/src/ +docs/source/cpp docs/source/tutorials docs/source/gen_images docs/source/gen_modules diff --git a/.gitmodules b/.gitmodules index 05344907..e69de29b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,10 +0,0 @@ -[submodule "kaldi"] - path = third_party/kaldi/submodule - url = https://github.com/kaldi-asr/kaldi - ignore = dirty -[submodule "third_party/kenlm/submodule"] - path = third_party/kenlm/kenlm - url = https://github.com/kpu/kenlm -[submodule "flashlight-text"] - path = third_party/flashlight-text/submodule - url = https://github.com/flashlight/text diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cf9a6d52..d8c4096a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,6 @@ +default_language_version: + node: 16.14.2 + repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.0.1 @@ -16,4 +19,28 @@ repos: - black == 22.3 - usort == 1.0.2 - libcst == 0.4.1 - exclude: examples + + - repo: https://github.com/pre-commit/mirrors-clang-format + rev: v11.0.1 + hooks: + - id: clang-format + + - repo: https://github.com/pycqa/flake8 + rev: 4.0.1 + hooks: + - id: flake8 + args: ['torchaudio', 'test', 'tools', 'docs/source/conf.py', 'examples'] + exclude: 'build|docs/src|third_party' + additional_dependencies: + - flake8-breakpoint == 1.1.0 + - flake8-bugbear == 22.6.22 + - flake8-comprehensions == 3.10.0 + - flake8-pyi == 22.5.1 + - mccabe == 0.6.0 + - pycodestyle == 2.8.0 + + - repo: https://github.com/pycqa/pydocstyle + rev: 6.3.0 + hooks: + - id: pydocstyle + exclude: 'build|test|examples|third_party|docs|tools' diff --git a/CITATION b/CITATION index 094c20ab..95b8d558 100644 --- a/CITATION +++ b/CITATION @@ -1,6 +1,8 @@ -@article{yang2021torchaudio, - title={TorchAudio: Building Blocks for Audio and Speech Processing}, - author={Yao-Yuan Yang and Moto Hira and Zhaoheng Ni and Anjali Chourdia and Artyom Astafurov and Caroline Chen and Ching-Feng Yeh and Christian Puhrsch and David Pollack and Dmitriy Genzel and Donny Greenberg and Edward Z. Yang and Jason Lian and Jay Mahadeokar and Jeff Hwang and Ji Chen and Peter Goldsborough and Prabhat Roy and Sean Narenthiran and Shinji Watanabe and Soumith Chintala and Vincent Quenneville-Bélair and Yangyang Shi}, - journal={arXiv preprint arXiv:2110.15018}, - year={2021} +@misc{hwang2023torchaudio, + title={TorchAudio 2.1: Advancing speech recognition, self-supervised learning, and audio processing components for PyTorch}, + author={Jeff Hwang and Moto Hira and Caroline Chen and Xiaohui Zhang and Zhaoheng Ni and Guangzhi Sun and Pingchuan Ma and Ruizhe Huang and Vineel Pratap and Yuekai Zhang and Anurag Kumar and Chin-Yun Yu and Chuang Zhu and Chunxi Liu and Jacob Kahn and Mirco Ravanelli and Peng Sun and Shinji Watanabe and Yangyang Shi and Yumeng Tao and Robin Scheibler and Samuele Cornell and Sean Kim and Stavros Petridis}, + year={2023}, + eprint={2310.17864}, + archivePrefix={arXiv}, + primaryClass={eess.AS} } diff --git a/CMakeLists.txt b/CMakeLists.txt index 0bbe18a3..f0195c87 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,28 +16,23 @@ if(NOT CMAKE_VERSION VERSION_LESS 3.15.0) cmake_policy(SET CMP0092 NEW) endif() +# Suppress warning about ExternalProject_Add timestamp +if(NOT CMAKE_VERSION VERSION_LESS 3.24.0) + cmake_policy(SET CMP0135 OLD) +endif() + project(torchaudio) -set (CMAKE_C_COMPILER "hipcc") -set (CMAKE_CXX_COMPILER "hipcc") + # check and set CMAKE_CXX_STANDARD string(FIND "${CMAKE_CXX_FLAGS}" "-std=c++" env_cxx_standard) if(env_cxx_standard GREATER -1) message( WARNING "C++ standard version definition detected in environment variable." - "PyTorch requires -std=c++14. Please remove -std=c++ settings in your environment.") -endif() - -set(CMAKE_CXX_STANDARD 14) -set(CMAKE_C_STANDARD 11) - -# https://developercommunity.visualstudio.com/t/VS-16100-isnt-compatible-with-CUDA-11/1433342 -if(MSVC) - if(USE_CUDA) - set(CMAKE_CXX_STANDARD 17) - endif() + "PyTorch requires -std=c++17. Please remove -std=c++ settings in your environment.") endif() - +set(CMAKE_CXX_STANDARD 17 CACHE STRING "The C++ standard whose features are requested to build this target.") +set(CMAKE_C_STANDARD 11 CACHE STRING "The C standard whose features are requested to build this target.") set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CMAKE_POSITION_INDEPENDENT_CODE ON) @@ -58,9 +53,10 @@ endif() # Options option(BUILD_SOX "Build libsox statically" ON) -option(BUILD_KALDI "Build kaldi statically" ON) +option(BUILD_RIR "Enable RIR simulation" ON) option(BUILD_RNNT "Enable RNN transducer" ON) -option(BUILD_CTC_DECODER "Build Flashlight CTC decoder" ON) +option(BUILD_ALIGN "Enable forced alignment" ON) +option(BUILD_CUDA_CTC_DECODER "Build CUCTC decoder" OFF) option(BUILD_TORCHAUDIO_PYTHON_EXTENSION "Build Python extension" OFF) option(USE_FFMPEG "Enable ffmpeg-based features" OFF) option(USE_CUDA "Enable CUDA support" OFF) @@ -84,6 +80,14 @@ endif() if(USE_CUDA) enable_language(CUDA) + set( + CMAKE_CUDA_FLAGS + "${CMAKE_CUDA_FLAGS} \ + -DCUDA_HAS_FP16=1 \ + -D__CUDA_NO_HALF_OPERATORS__ \ + -D__CUDA_NO_HALF_CONVERSIONS__ \ + -D__CUDA_NO_HALF2_OPERATORS__" + ) endif() include(cmake/TorchAudioHelper.cmake) @@ -112,7 +116,16 @@ if(MSVC) unsigned_compare_with_zero declared_but_not_referenced bad_friend_decl) - string(APPEND CMAKE_CUDA_FLAGS " -Xcudafe --diag_suppress=${diag}") + string( + APPEND + CMAKE_CUDA_FLAGS + " -Xcudafe \ + --diag_suppress=${diag} \ + -DCUDA_HAS_FP16=1 \ + -D__CUDA_NO_HALF_OPERATORS__ \ + -D__CUDA_NO_HALF_CONVERSIONS__ \ + -D__CUDA_NO_HALF2_OPERATORS__" + ) endforeach() CUDA_CONVERT_FLAGS(torch_cpu) if(TARGET torch_cuda) @@ -134,10 +147,41 @@ endif() # TORCH_CXX_FLAGS contains the same -D_GLIBCXX_USE_CXX11_ABI value as PyTorch if (MSVC) set(warning_flags /W4) + # the following line is added in order to export symbols when building on Windows + # this approach has some limitations as documented in https://github.com/pytorch/pytorch/pull/3650 + set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) else() set(warning_flags -Wall) endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${warning_flags} ${TORCH_CXX_FLAGS}") -add_subdirectory(third_party) +find_program(CCACHE_PROGRAM ccache) +if(CCACHE_PROGRAM) + message(STATUS "Found ccache") + set(CMAKE_C_COMPILER_LAUNCHER "${CCACHE_PROGRAM}") + set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PROGRAM}") + set(CMAKE_CUDA_COMPILER_LAUNCHER "${CCACHE_PROGRAM}") +else() + message(STATUS "Could not find ccache. Consider installing ccache to speed up compilation.") +endif() + add_subdirectory(torchaudio/csrc) +if (BUILD_SOX) + add_subdirectory(third_party/sox) + add_subdirectory(torchaudio/csrc/sox) +endif() +if (USE_FFMPEG) + if (DEFINED ENV{FFMPEG_ROOT}) + add_subdirectory(third_party/ffmpeg/single) + else() + message(STATUS "Building FFmpeg integration with multi version support") + add_subdirectory(third_party/ffmpeg/multi) + endif() + add_subdirectory(torchaudio/csrc/ffmpeg) +endif() +if (BUILD_CUDA_CTC_DECODER) + if (NOT USE_CUDA) + message(FATAL "BUILD_CUDA_CTC_DECODER=1 but USE_CUDA=0.") + endif() + add_subdirectory(torchaudio/csrc/cuctc) +endif() diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c11d37de..071cb698 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -49,13 +49,11 @@ torchaudio. conda install pytorch -c pytorch-nightly ``` -### Install build dependencies +### Install build/runtime dependencies ```bash # Install build-time dependencies pip install cmake ninja -# [optional for sox] -conda install pkg-config # [optional for ffmpeg] conda install ffmpeg ``` @@ -74,9 +72,8 @@ python setup.py develop Some environmnet variables that change the build behavior - `BUILD_SOX`: Deteremines whether build and bind libsox in non-Windows environments. (no effect in Windows as libsox integration is not available) Default value is 1 (build and bind). Use 0 for disabling it. - `USE_CUDA`: Determines whether build the custom CUDA kernel. Default to the availability of CUDA-compatible GPUs. -- `BUILD_KALDI`: Determines whether build Kaldi extension. This is required for `kaldi_pitch` function. Default value is 1 on Linux/macOS and 0 on Windows. - `BUILD_RNNT`: Determines whether build RNN-T loss function. Default value is 1. -- `BUILD_CTC_DECODER`: Determines whether build decoder features based on KenLM and FlashLight CTC decoder. Default value is 1. +- `BUILD_CUDA_CTC_DECODER`: Determines whether build decoder features based on CUDA CTC decoder. Default value is 1. (`USE_CUDA` has to be 1.) Please check the [./tools/setup_helpers/extension.py](./tools/setup_helpers/extension.py) for the up-to-date detail. diff --git a/README.md b/README.md index 07634fc0..532725ad 100644 --- a/README.md +++ b/README.md @@ -1,68 +1,93 @@ -# TORCHAUDIO - -## 介绍 - -[Introduction](README_ORIGIN.md) - -## 安装 - -### System Requirements - -- Linux. - -- Python 3.7, 3.8, 3.9 - -- (**推荐**) Upgrade pip - - ``` - python3 -m pip install --upgrade pip #--user - ``` - -### 使用pip安装(以0.13.1版本为例) -可以在光合[光合开发者社区](https://developer.hpccube.com/tool/#sdk) AI 生态包中获取最新的 torchaudio Release 版本(需对应 DCU Toolkit 版本与 python 版本) -```bash -python3 -m pip install torchaudio-0.13.1+gitd946a7c.abi0.dtk2304-cp37-cp37m-linux_x86_64.whl -``` - -### 使用源码安装 - -#### 编译环境准备(以0.13.1版本为例) - -- 拉取 torchaudio 代码 - - ``` - git clone -b 0.13.1-dtk23.04 http://developer.hpccube.com/codes/aicomponent/torchaudio.git - ``` - -- 在[开发者社区](https://developer.hpccube.com/tool/#sdk) DCU Toolkit 中下载 DTK-23.04 解压至 /opt/ 路径下,并建立软链接 - - ``` - cd /opt && ln -s dtk-23.04 dtk - ``` - -- 在光合[光合开发者社区](https://developer.hpccube.com/tool/#sdk) AI 生态包中获取对应的 pytorch Release 版本(需对应 DCU Toolkit 版本与 python 版本) -```bash -python3 -m pip install torch-1.13.1a0+git4c8a1fe.abi0.dtk2304-cp37-cp37m-linux_x86_64.whl -``` - -- 导入环境变量以及安装必要依赖库 - - ```bash - source /opt/dtk/env.sh - - export PYTORCH_ROCM_ARCH="gfx906;gfx926" - - MAX_JOBS=16 - pip3 install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple --trusted-host pypi.tuna.tsinghua.edu.cn - ``` - - -#### 编译安装 - -- 执行编译命令 - ```shell - cd audio - CXX=hipcc CC=hipcc python3 setup.py bdist_wheel - pip install dist/torchaudio* - ``` - +#
TorchAudio
+ +## 简介 + +torchaudio 的目标是将 PyTorch 应用于音频领域。通过支持 PyTorch,torchaudio 遵循了相同的理念,即提供强大的 GPU 加速,注重通过 autograd 系统实现可训练的特性,并保持一致的风格(张量命名和维度命名)。因此,它主要是一个机器学习库,而不是一个通用的信号处理库。PyTorch 的优势在 torchaudio 中得以体现,所有计算都通过 PyTorch 操作完成,这使得它易于使用,并且像 PyTorch 的自然扩展。torchaudio官方github地址:[GitHub - pytorch/audio: Data manipulation and transformation for audio signal processing, powered by PyTorch](https://github.com/pytorch/audio) + +- 支持音频输入输出(加载文件,保存文件) + + - 使用 SoX 将各种音频格式(如 wav、mp3、ogg、flac、opus、sphere)加载到 PyTorch 的张量中。 + + - 支持 Kaldi(ark/scp)格式。 + +- 数据加载器 + + - 提供常见音频数据集的数据加载器。 + +- 音频与语音处理功能 + + - 强制对齐(forced_align)。 + +- 常用音频变换 + + - 提供如频谱图、AmplitudeToDB、MelScale、MelSpectrogram、MFCC、MuLaw 编码与解码、重采样等常用的音频变换。 + +- 兼容性接口 + + - 通过 PyTorch 运行与其他库(如 Kaldi)对齐的代码,包括频谱图、fbank、MFCC 等功能。 + +## 安装 + +### 适用环境 + +- ubuntu20.04 或 rocky8.6 + +- Python==3.10 + +- PyTorch==2.3.0 DTK>=24.04 + +### 使用pip方式安装 + +torchaudio whl包下载目录:[torchaudio-2.1.2+das.opt1.dtk24042-cp310-cp310-linux_x86_64.whl](http://10.6.10.68:8000/debug/torchaudio/24.04/) + +```shell +pip install torchaudio* # (请下载对应操作系统的torchaudio的whl包) +``` + +### 源码编译安装 + +#### 编译环境准备 + +- 拉取torchaudio代码 + +```shell +git clone -b 2.1.2+das.opt1.dtk24042 http://developer.hpccube.com/codes/aicomponent/torchaudio.git +``` + +- 导入环境变量以及安装必要依赖库 + +```shell +source /opt/dtk/env.sh +cd audio +bash ./test/tools/make_test_env.sh # ubuntu +bash ./test/tools/rocky8_make_test_env.sh #rocky +export PYTORCH_ROCM_ARCH="gfx928" +MAX_JOBS=16 +``` + +#### 编译安装 + +- 执行编译命令并安装 + +```shell +CMAKE_CXX_FLAGS=-std=c++17 USE_FFMPEG=1 USE_ROCM=1 python3 setup.py bdist_wheel +pip install dist/torchaudio* +``` + +## 版本号查询 + +```shell +python -c "import torchaudio; print(torchaudio.__version__)" +``` + +- 版本号与官方版本同步,查询该软件的版本号,例如2.1.2; + +## Known Issue + +- 无 + +## 其他参考 + +- [README_ORIGIN](README_ORIGIN.md) + +- [GitHub - pytorch/audio](https://github.com/pytorch/audio) diff --git a/README_ORIGIN.md b/README_ORIGIN.md index 7303e98e..6494094e 100644 --- a/README_ORIGIN.md +++ b/README_ORIGIN.md @@ -1,7 +1,6 @@ torchaudio: an audio library for PyTorch ======================================== -[![Build Status](https://circleci.com/gh/pytorch/audio.svg?style=svg)](https://app.circleci.com/pipelines/github/pytorch/audio) [![Documentation](https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchaudio%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v)](https://pytorch.org/audio/main/) [![Anaconda Badge](https://anaconda.org/pytorch/torchaudio/badges/downloads.svg)](https://anaconda.org/pytorch/torchaudio) [![Anaconda-Server Badge](https://anaconda.org/pytorch/torchaudio/badges/platforms.svg)](https://anaconda.org/pytorch/torchaudio) @@ -21,112 +20,18 @@ to use and feel like a natural extension. - Load a variety of audio formats, such as `wav`, `mp3`, `ogg`, `flac`, `opus`, `sphere`, into a torch Tensor using SoX - [Kaldi (ark/scp)](http://pytorch.org/audio/main/kaldi_io.html) - [Dataloaders for common audio datasets](http://pytorch.org/audio/main/datasets.html) +- Audio and speech processing functions + - [forced_align](https://pytorch.org/audio/main/generated/torchaudio.functional.forced_align.html) - Common audio transforms - - [Spectrogram, AmplitudeToDB, MelScale, MelSpectrogram, MFCC, MuLawEncoding, MuLawDecoding, Resample](http://pytorch.org/audio/main/transforms.html) + - [Spectrogram, AmplitudeToDB, MelScale, MelSpectrogram, MFCC, MuLawEncoding, MuLawDecoding, Resample](http://pytorch.org/audio/main/transforms.html) - Compliance interfaces: Run code using PyTorch that align with other libraries - - [Kaldi: spectrogram, fbank, mfcc](https://pytorch.org/audio/main/compliance.kaldi.html) - -Dependencies ------------- -* PyTorch (See below for the compatible versions) -* [optional] vesis84/kaldi-io-for-python commit cb46cb1f44318a5d04d4941cf39084c5b021241e or above - -The following are the corresponding ``torchaudio`` versions and supported Python versions. - -| | ``torch`` | ``torchaudio`` | ``python`` | -| ----------- | ------------------------ | ------------------------ | ------------------------------- | -| Development | ``master`` / ``nightly`` | ``main`` / ``nightly`` | ``>=3.7``, ``<=3.10`` | -| Latest versioned release | ``1.12.1`` | ``0.12.1`` | ``>=3.7``, ``<=3.10`` | -| LTS | ``1.8.2`` | ``0.8.2`` | ``>=3.6``, ``<=3.9`` | - -
Previous versions - -| ``torch`` | ``torchaudio`` | ``python`` | -| ------------------------ | ------------------------ | ------------------------------- | -| ``1.12.0`` | ``0.12.0`` | ``>=3.7``, ``<=3.10`` | -| ``1.11.0`` | ``0.11.0`` | ``>=3.7``, ``<=3.9`` | -| ``1.10.0`` | ``0.10.0`` | ``>=3.6``, ``<=3.9`` | -| ``1.9.1`` | ``0.9.1`` | ``>=3.6``, ``<=3.9`` | -| ``1.9.0`` | ``0.9.0`` | ``>=3.6``, ``<=3.9`` | -| ``1.8.2`` | ``0.8.2`` | ``>=3.6``, ``<=3.9`` | -| ``1.8.0`` | ``0.8.0`` | ``>=3.6``, ``<=3.9`` | -| ``1.7.1`` | ``0.7.2`` | ``>=3.6``, ``<=3.9`` | -| ``1.7.0`` | ``0.7.0`` | ``>=3.6``, ``<=3.8`` | -| ``1.6.0`` | ``0.6.0`` | ``>=3.6``, ``<=3.8`` | -| ``1.5.0`` | ``0.5.0`` | ``>=3.5``, ``<=3.8`` | -| ``1.4.0`` | ``0.4.0`` | ``==2.7``, ``>=3.5``, ``<=3.8`` | - -
+ - [Kaldi: spectrogram, fbank, mfcc](https://pytorch.org/audio/main/compliance.kaldi.html) Installation ------------ -### Binary Distributions - -`torchaudio` has binary distributions for PyPI (`pip`) and Anaconda (`conda`). - -Please refer to https://pytorch.org/get-started/locally/ for the details. - -**Note** Starting `0.10`, torchaudio has CPU-only and CUDA-enabled binary distributions, each of which requires a matching PyTorch version. - -**Note** LTS versions are distributed through a different channel than the other versioned releases. Please refer to the above page for details. - -**Note** This software was compiled against an unmodified copy of FFmpeg (licensed under [the LGPLv2.1](https://github.com/FFmpeg/FFmpeg/blob/a5d2008e2a2360d351798e9abe883d603e231442/COPYING.LGPLv2.1)), with the specific rpath removed so as to enable the use of system libraries. The LGPL source can be downloaded [here](https://github.com/FFmpeg/FFmpeg/releases/tag/n4.1.8). - -### From Source +Please refer to https://pytorch.org/audio/main/installation.html for installation and build process of TorchAudio. -On non-Windows platforms, the build process builds libsox and codecs that torchaudio need to link to. It will fetch and build libmad, lame, flac, vorbis, opus, and libsox before building extension. This process requires `cmake` and `pkg-config`. libsox-based features can be disabled with `BUILD_SOX=0`. -The build process also builds the RNN transducer loss and CTC beam search decoder. These functionalities can be disabled by setting the environment variable `BUILD_RNNT=0` and `BUILD_CTC_DECODER=0`, respectively. - -```bash -# Linux -python setup.py install - -# OSX -CC=clang CXX=clang++ python setup.py install - -# Windows -# We need to use the MSVC x64 toolset for compilation, with Visual Studio's vcvarsall.bat or directly with vcvars64.bat. -# These batch files are under Visual Studio's installation folder, under 'VC\Auxiliary\Build\'. -# More information available at: -# https://docs.microsoft.com/en-us/cpp/build/how-to-enable-a-64-bit-visual-cpp-toolset-on-the-command-line?view=msvc-160#use-vcvarsallbat-to-set-a-64-bit-hosted-build-architecture -call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" x64 && set BUILD_SOX=0 && python setup.py install -# or -call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvars64.bat" && set BUILD_SOX=0 && python setup.py install -``` - -This is known to work on linux and unix distributions such as Ubuntu and CentOS 7 and macOS. -If you try this on a new system and find a solution to make it work, feel free to share it by opening an issue. - -Quick Usage ------------ - -```python -import torchaudio - -waveform, sample_rate = torchaudio.load('foo.wav') # load tensor from file -torchaudio.save('foo_save.wav', waveform, sample_rate) # save tensor to file -``` - -Backend Dispatch ----------------- - -By default in OSX and Linux, torchaudio uses SoX as a backend to load and save files. -The backend can be changed to [SoundFile](https://pysoundfile.readthedocs.io/en/latest/) -using the following. See [SoundFile](https://pysoundfile.readthedocs.io/en/latest/) -for installation instructions. - -```python -import torchaudio -torchaudio.set_audio_backend("soundfile") # switch backend - -waveform, sample_rate = torchaudio.load('foo.wav') # load tensor from file, as usual -torchaudio.save('foo_save.wav', waveform, sample_rate) # save tensor to file, as usual -``` - -**Note** -- SoundFile currently does not support mp3. -- "soundfile" backend is not supported by TorchScript. API Reference ------------- @@ -152,9 +57,29 @@ If you find this package useful, please cite as: } ``` +```bibtex +@misc{hwang2023torchaudio, + title={TorchAudio 2.1: Advancing speech recognition, self-supervised learning, and audio processing components for PyTorch}, + author={Jeff Hwang and Moto Hira and Caroline Chen and Xiaohui Zhang and Zhaoheng Ni and Guangzhi Sun and Pingchuan Ma and Ruizhe Huang and Vineel Pratap and Yuekai Zhang and Anurag Kumar and Chin-Yun Yu and Chuang Zhu and Chunxi Liu and Jacob Kahn and Mirco Ravanelli and Peng Sun and Shinji Watanabe and Yangyang Shi and Yumeng Tao and Robin Scheibler and Samuele Cornell and Sean Kim and Stavros Petridis}, + year={2023}, + eprint={2310.17864}, + archivePrefix={arXiv}, + primaryClass={eess.AS} +} +``` + Disclaimer on Datasets ---------------------- This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license. If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community! + +Pre-trained Model License +------------------------- + +The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the dataset used for training. It is your responsibility to determine whether you have permission to use the models for your use case. + +For instance, SquimSubjective model is released under the Creative Commons Attribution Non Commercial 4.0 International (CC-BY-NC 4.0) license. See [the link](https://zenodo.org/record/4660670#.ZBtWPOxuerN) for additional details. + +Other pre-trained models that have different license are noted in documentation. Please checkout the [documentation page](https://pytorch.org/audio/main/). diff --git a/cmake/TorchAudioHelper.cmake b/cmake/TorchAudioHelper.cmake index 91a9394e..acbbef45 100644 --- a/cmake/TorchAudioHelper.cmake +++ b/cmake/TorchAudioHelper.cmake @@ -1,8 +1,22 @@ find_package(Torch REQUIRED) +# Remove stray mkl dependency found in Intel mac. +# +# For Intel mac, torch_cpu has caffe2::mkl, which adds link flags like +# -lmkl_intel_ilp64, -lmkl_core and -lmkl_intel_thread. +# Even though TorchAudio does not call any of MKL functions directly, +# Apple's linker does not drop them, instead it bakes these dependencies +# Therefore, we remove it. +# See https://github.com/pytorch/audio/pull/3307 +get_target_property(dep torch_cpu INTERFACE_LINK_LIBRARIES) +if ("caffe2::mkl" IN_LIST dep) + list(REMOVE_ITEM dep "caffe2::mkl") + set_target_properties(torch_cpu PROPERTIES INTERFACE_LINK_LIBRARIES "${dep}") +endif() + function (torchaudio_library name source include_dirs link_libraries compile_defs) add_library(${name} SHARED ${source}) - target_include_directories(${name} PRIVATE ${include_dirs}) + target_include_directories(${name} PRIVATE "${PROJECT_SOURCE_DIR};${include_dirs}") target_link_libraries(${name} ${link_libraries}) target_compile_definitions(${name} PRIVATE ${compile_defs}) set_target_properties(${name} PROPERTIES PREFIX "") @@ -52,8 +66,21 @@ if (BUILD_TORCHAUDIO_PYTHON_EXTENSION) endif() install( TARGETS ${name} - LIBRARY DESTINATION . - RUNTIME DESTINATION . # For Windows + LIBRARY DESTINATION lib + RUNTIME DESTINATION lib # For Windows ) endfunction() endif() + + +if (USE_CUDA) + add_library(cuda_deps INTERFACE) + target_include_directories(cuda_deps INTERFACE ${CUDA_TOOLKIT_INCLUDE}) + target_compile_definitions(cuda_deps INTERFACE USE_CUDA) + target_link_libraries( + cuda_deps + INTERFACE + ${C10_CUDA_LIBRARY} + ${CUDA_CUDART_LIBRARY} + ) +endif() diff --git a/docs/Makefile b/docs/Makefile index 630a95dc..2df69cae 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -24,4 +24,12 @@ docset: html # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile + doxygen source/Doxyfile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + @python post_process_dispatcher.py $(BUILDDIR) + +clean: + rm -rf $(BUILDDIR)/* + rm -rf $(SOURCEDIR)/generated/ + rm -rf $(SOURCEDIR)/aen_images/ + rm -rf $(SOURCEDIR)/gen_modules/ diff --git a/docs/post_process_dispatcher.py b/docs/post_process_dispatcher.py new file mode 100644 index 00000000..53f94ad9 --- /dev/null +++ b/docs/post_process_dispatcher.py @@ -0,0 +1,16 @@ +"""Replaces every instance of 'torchaudio._backend' with 'torchaudio' in torchaudio.html. +Temporary hack while we maintain both the existing set of info/load/save functions and the +new ones backed by the backend dispatcher in torchaudio._backend. +""" +import sys + +if __name__ == "__main__": + build_dir = sys.argv[1] + filepath = f"{build_dir}/html/torchaudio.html" + + with open(filepath, "r") as f: + text = f.read() + text = text.replace("torchaudio._backend", "torchaudio") + + with open(filepath, "w") as f: + f.write(text) diff --git a/docs/requirements-tutorials.txt b/docs/requirements-tutorials.txt index c00ec02c..e125b374 100644 --- a/docs/requirements-tutorials.txt +++ b/docs/requirements-tutorials.txt @@ -3,7 +3,7 @@ deep-phonemizer boto3 cython pandas -librosa +librosa==0.10.0 sentencepiece pandoc mir_eval diff --git a/docs/requirements.txt b/docs/requirements.txt index 3239feea..1fc3d1e2 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,13 +1,16 @@ Jinja2<3.1.0 -matplotlib +matplotlib<=3.8 pyparsing<3,>=2.0.2 +# C++ docs +breathe==4.34.0 + # Note: # When changing Sphinx-related packages, make sure that the custom behaviors in the following # locations are working as expected. # - source/_templates/layout.html # - source/_static/css/custom.css --e git+https://github.com/pytorch/pytorch_sphinx_theme.git@cece053#egg=pytorch_sphinx_theme +-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@32a6550#egg=pytorch_sphinx_theme sphinx==5.1.1 sphinxcontrib.katex==0.8.6 sphinxcontrib.bibtex==2.4.2 diff --git a/docs/source/Doxyfile b/docs/source/Doxyfile new file mode 100644 index 00000000..4235c71c --- /dev/null +++ b/docs/source/Doxyfile @@ -0,0 +1,2727 @@ +# Doxyfile 1.9.5 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). +# +# Note: +# +# Use doxygen to compare the used configuration file with the template +# configuration file: +# doxygen -x [configFile] +# Use doxygen to compare the used configuration file with the template +# configuration file without replacing the environment variables or CMake type +# replacement variables: +# doxygen -x_noenv [configFile] + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "libtorchaudio" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = source/cpp + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create up to 4096 +# sub-directories (in 2 levels) under the output directory of each output format +# and will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. Adapt CREATE_SUBDIRS_LEVEL to +# control the number of sub-directories. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# Controls the number of sub-directories that will be created when +# CREATE_SUBDIRS tag is set to YES. Level 0 represents 16 directories, and every +# level increment doubles the number of directories, resulting in 4096 +# directories at level 8 which is the default and also the maximum value. The +# sub-directories are organized in 2 levels, the first level always has a fixed +# numer of 16 directories. +# Minimum value: 0, maximum value: 8, default value: 8. +# This tag requires that the tag CREATE_SUBDIRS is set to YES. + +CREATE_SUBDIRS_LEVEL = 8 + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Bulgarian, +# Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, English +# (United States), Esperanto, Farsi (Persian), Finnish, French, German, Greek, +# Hindi, Hungarian, Indonesian, Italian, Japanese, Japanese-en (Japanese with +# English messages), Korean, Korean-en (Korean with English messages), Latvian, +# Lithuanian, Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, +# Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, +# Swedish, Turkish, Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# By default Python docstrings are displayed as preformatted text and doxygen's +# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the +# doxygen's special commands can be used and the contents of the docstring +# documentation blocks is shown as doxygen documentation. +# The default value is: YES. + +PYTHON_DOCSTRING = YES + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:^^" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". Note that you cannot put \n's in the value part of an alias +# to insert newlines (in the resulting output). You can put ^^ in the value part +# of an alias to insert a newline as if a physical newline was in the original +# file. When you need a literal { or } or , in the value part of an alias you +# have to escape them by means of a backslash (\), this can lead to conflicts +# with the commands \{ and \} for these it is advised to use the version @{ and +# @} or use a double escape (\\{ and \\}) + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice, +# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. When specifying no_extension you should add +# * to the FILE_PATTERNS. +# +# Note see also the list of default file extension mappings. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See https://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 5. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 5 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +# The NUM_PROC_THREADS specifies the number of threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which effectively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# declarations. If set to NO, these declarations will be included in the +# documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# With the correct setting of option CASE_SENSE_NAMES doxygen will better be +# able to match the capabilities of the underlying filesystem. In case the +# filesystem is case sensitive (i.e. it supports files in the same directory +# whose names only differ in casing), the option must be set to YES to properly +# deal with such files in case they appear in the input. For filesystems that +# are not case sensitive the option should be set to NO to properly deal with +# output files written for symbols that only differ in casing, such as for two +# classes, one named CLASS and the other named Class, and to also support +# references to files without having to specify the exact matching casing. On +# Windows (including Cygwin) and MacOS, users should typically set this option +# to NO, whereas on Linux or other Unix flavors it should typically be set to +# YES. +# Possible values are: SYSTEM, NO and YES. +# The default value is: SYSTEM. + +CASE_SENSE_NAMES = SYSTEM + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class +# will show which file needs to be included to use the class. +# The default value is: YES. + +SHOW_HEADERFILE = YES + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. See also section "Changing the +# layout of pages" for information. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as documenting some parameters in +# a documented function twice, or documenting parameters that don't exist or +# using markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete +# function parameter documentation. If set to NO, doxygen will accept that some +# parameters have no documentation without warning. +# The default value is: YES. + +WARN_IF_INCOMPLETE_DOC = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong parameter +# documentation, but not about the absence of documentation. If EXTRACT_ALL is +# set to YES then this flag will automatically be disabled. See also +# WARN_IF_INCOMPLETE_DOC +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# Possible values are: NO, YES and FAIL_ON_WARNINGS. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# See also: WARN_LINE_FORMAT +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# In the $text part of the WARN_FORMAT command it is possible that a reference +# to a more specific place is given. To make it easier to jump to this place +# (outside of doxygen) the user can define a custom "cut" / "paste" string. +# Example: +# WARN_LINE_FORMAT = "'vi $file +$line'" +# See also: WARN_FORMAT +# The default value is: at line $line of file $file. + +WARN_LINE_FORMAT = "at line $line of file $file" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). In case the file specified cannot be opened for writing the +# warning and error messages are written to standard error. When as file - is +# specified the warning and error messages are written to standard output +# (stdout). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = ../torchaudio/csrc/ffmpeg/stream_reader/typedefs.h \ + ../torchaudio/csrc/ffmpeg/stream_reader/stream_reader.h \ + ../torchaudio/csrc/ffmpeg/stream_writer/stream_writer.h + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: +# https://www.gnu.org/software/libiconv/) for the list of possible encodings. +# See also: INPUT_FILE_ENCODING +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses The INPUT_FILE_ENCODING tag can be used to specify +# character encoding on a per file pattern basis. Doxygen will compare the file +# name with each pattern and apply the encoding instead of the default +# INPUT_ENCODING) if there is a match. The character encodings are a list of the +# form: pattern=encoding (like *.php=ISO-8859-1). See cfg_input_encoding +# "INPUT_ENCODING" for further information on supported encodings. + +INPUT_FILE_ENCODING = + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# Note the list of default checked file patterns might differ from the list of +# default file extension mappings. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, +# *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C +# comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, +# *.vhdl, *.ucf, *.qsf and *.ice. + +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.l \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.py \ + *.pyw \ + *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f18 \ + *.f \ + *.for \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf \ + *.ice + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# ANamespace::AClass, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that doxygen will use the data processed and written to standard output +# for further processing, therefore nothing else, like debug statements or used +# commands (so in case of a Windows batch file always use @echo OFF), should be +# written to standard output. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +# The Fortran standard specifies that for fixed formatted Fortran code all +# characters from position 72 are to be considered as comment. A common +# extension is to allow longer lines before the automatic comment starts. The +# setting FORTRAN_COMMENT_AFTER will also make it possible that longer lines can +# be processed before the automatic comment starts. +# Minimum value: 7, maximum value: 10000, default value: 72. + +FORTRAN_COMMENT_AFTER = 72 + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# entity all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see https://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE tag can be used to specify if the generated HTML output +# should be rendered with a dark or light theme. Default setting AUTO_LIGHT +# enables light output unless the user preference is dark output. Other options +# are DARK to always use dark mode, LIGHT to always use light mode, AUTO_DARK to +# default to dark mode unless the user prefers light mode, and TOGGLE to let the +# user toggle between dark and light mode via a button. +# Possible values are: LIGHT Always generate light output., DARK Always generate +# dark output., AUTO_LIGHT Automatically set the mode according to the user +# preference, use light mode if no preference is set (the default)., AUTO_DARK +# Automatically set the mode according to the user preference, use dark mode if +# no preference is set. and TOGGLE Allow to user to switch between light and +# dark mode via a button.. +# The default value is: AUTO_LIGHT. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE = AUTO_LIGHT + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a color-wheel, see +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use gray-scales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via JavaScript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have JavaScript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: +# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To +# create a documentation set, doxygen will generate a Makefile in the HTML +# output directory. Running make will produce the docset in that directory and +# running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag determines the URL of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDURL = + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# on Windows. In the beginning of 2021 Microsoft took the original page, with +# a.o. the download links, offline the HTML help workshop was already many years +# in maintenance mode). You can download the HTML help workshop from the web +# archives at Installation executable (see: +# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo +# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe). +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the main .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location (absolute path +# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to +# run qhelpgenerator on the generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine tune the look of the index (see "Fine-tuning the output"). As an +# example, the default style sheet generated by doxygen has an example that +# shows how to put an image at the root of the tree instead of the PROJECT_NAME. +# Since the tree basically has the same information as the tab index, you could +# consider setting DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the +# FULL_SIDEBAR option determines if the side bar is limited to only the treeview +# area (value NO) or if it should extend to the full height of the window (value +# YES). Setting this to YES gives a layout similar to +# https://docs.readthedocs.io with more room for contents, but less room for the +# project logo, title, and description. If either GENERATE_TREEVIEW or +# DISABLE_INDEX is set to NO, this option has no effect. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FULL_SIDEBAR = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email +# addresses. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +OBFUSCATE_EMAILS = YES + +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = png + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. + +FORMULA_MACROFILE = + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# https://www.mathjax.org) which uses client side JavaScript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# With MATHJAX_VERSION it is possible to specify the MathJax version to be used. +# Note that the different versions of MathJax have different requirements with +# regards to the different settings, so it is possible that also other MathJax +# settings have to be changed when switching between the different MathJax +# versions. +# Possible values are: MathJax_2 and MathJax_3. +# The default value is: MathJax_2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_VERSION = MathJax_2 + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. For more details about the output format see MathJax +# version 2 (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3 +# (see: +# http://docs.mathjax.org/en/latest/web/components/output.html). +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility. This is the name for Mathjax version 2, for MathJax version 3 +# this will be translated into chtml), NativeMML (i.e. MathML. Only supported +# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This +# is the name for Mathjax version 3, for MathJax version 2 this will be +# translated into HTML-CSS) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from https://www.mathjax.org before deployment. The default value is: +# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2 +# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3 +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# for MathJax version 2 (see +# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions): +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# For example for MathJax version 3 (see +# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html): +# MATHJAX_EXTENSIONS = ams +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /