Commit 476ab9ab authored by moto's avatar moto Committed by Facebook GitHub Bot
Browse files

Consolidate bibliography / reference (#2676)

Summary:
Preparation for the adoptation of `autosummary`.

Replace `:footcite:` with `:cite:` and introduce dedicated reference page, as `:footcite:` does not work well with `autosummary`.

Example:

https://output.circle-artifacts.com/output/job/4da47ba6-d9c7-418e-b5b0-e9f8a146a6c3/artifacts/0/docs/datasets.html#cmuarctic

https://output.circle-artifacts.com/output/job/4da47ba6-d9c7-418e-b5b0-e9f8a146a6c3/artifacts/0/docs/references.html

Pull Request resolved: https://github.com/pytorch/audio/pull/2676

Reviewed By: carolineechen

Differential Revision: D39509431

Pulled By: mthrok

fbshipit-source-id: e6003dd01ec3eff3d598054690f61de8ee31ac9a
parent 50c66721
...@@ -72,6 +72,7 @@ delimiters : [ ...@@ -72,6 +72,7 @@ delimiters : [
""" """
bibtex_bibfiles = ["refs.bib"] bibtex_bibfiles = ["refs.bib"]
bibtex_reference_style = "author_year"
def _get_var(var, default=False): def _get_var(var, default=False):
......
...@@ -166,9 +166,3 @@ MUSDB_HQ ...@@ -166,9 +166,3 @@ MUSDB_HQ
.. autoclass:: MUSDB_HQ .. autoclass:: MUSDB_HQ
:members: :members:
:special-members: __getitem__ :special-members: __getitem__
References
~~~~~~~~~~
.. footbibliography::
...@@ -291,8 +291,3 @@ edit_distance ...@@ -291,8 +291,3 @@ edit_distance
------------- -------------
.. autofunction:: edit_distance .. autofunction:: edit_distance
References
~~~~~~~~~~
.. footbibliography::
...@@ -30,6 +30,7 @@ Features described in this documentation are classified by release status: ...@@ -30,6 +30,7 @@ Features described in this documentation are classified by release status:
Index <self> Index <self>
supported_features supported_features
references
API References API References
-------------- --------------
......
...@@ -64,8 +64,3 @@ download_pretrained_files ...@@ -64,8 +64,3 @@ download_pretrained_files
~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: download_pretrained_files .. autoclass:: download_pretrained_files
References
----------
.. footbibliography::
...@@ -263,8 +263,3 @@ WaveRNN ...@@ -263,8 +263,3 @@ WaveRNN
.. automethod:: forward .. automethod:: forward
.. automethod:: infer .. automethod:: infer
References
~~~~~~~~~~
.. footbibliography::
...@@ -349,8 +349,3 @@ HDEMUCS_HIGH_MUSDB ...@@ -349,8 +349,3 @@ HDEMUCS_HIGH_MUSDB
.. autodata:: HDEMUCS_HIGH_MUSDB .. autodata:: HDEMUCS_HIGH_MUSDB
:no-value: :no-value:
References
----------
.. footbibliography::
...@@ -22,8 +22,3 @@ ConvEmformer ...@@ -22,8 +22,3 @@ ConvEmformer
.. automethod:: forward .. automethod:: forward
.. automethod:: infer .. automethod:: infer
References
~~~~~~~~~~
.. footbibliography::
...@@ -24,8 +24,3 @@ EMFORMER_RNNT_BASE_TEDLIUM3 ...@@ -24,8 +24,3 @@ EMFORMER_RNNT_BASE_TEDLIUM3
.. autodata:: EMFORMER_RNNT_BASE_TEDLIUM3 .. autodata:: EMFORMER_RNNT_BASE_TEDLIUM3
:no-value: :no-value:
References
----------
.. footbibliography::
References
----------
.. bibliography::
...@@ -217,8 +217,3 @@ Transforms are common audio transforms. They can be chained together using :clas ...@@ -217,8 +217,3 @@ Transforms are common audio transforms. They can be chained together using :clas
.. autoclass:: SoudenMVDR .. autoclass:: SoudenMVDR
.. automethod:: forward .. automethod:: forward
References
~~~~~~~~~~
.. footbibliography::
...@@ -49,7 +49,7 @@ def load_cmuarctic_item(line: str, path: str, folder_audio: str, ext_audio: str) ...@@ -49,7 +49,7 @@ def load_cmuarctic_item(line: str, path: str, folder_audio: str, ext_audio: str)
class CMUARCTIC(Dataset): class CMUARCTIC(Dataset):
"""Create a Dataset for *CMU ARCTIC* [:footcite:`Kominek03cmuarctic`]. """Create a Dataset for *CMU ARCTIC* :cite:`Kominek03cmuarctic`.
Args: Args:
root (str or Path): Path to the directory where the dataset is found or downloaded. root (str or Path): Path to the directory where the dataset is found or downloaded.
......
...@@ -104,7 +104,7 @@ def _parse_dictionary(lines: Iterable[str], exclude_punctuations: bool) -> List[ ...@@ -104,7 +104,7 @@ def _parse_dictionary(lines: Iterable[str], exclude_punctuations: bool) -> List[
class CMUDict(Dataset): class CMUDict(Dataset):
"""Create a Dataset for *CMU Pronouncing Dictionary* [:footcite:`cmudict`] (CMUDict). """Create a Dataset for *CMU Pronouncing Dictionary* :cite:`cmudict` (CMUDict).
Args: Args:
root (str or Path): Path to the directory where the dataset is found or downloaded. root (str or Path): Path to the directory where the dataset is found or downloaded.
......
...@@ -28,7 +28,7 @@ def load_commonvoice_item( ...@@ -28,7 +28,7 @@ def load_commonvoice_item(
class COMMONVOICE(Dataset): class COMMONVOICE(Dataset):
"""Create a Dataset for *CommonVoice* [:footcite:`ardila2020common`]. """Create a Dataset for *CommonVoice* :cite:`ardila2020common`.
Args: Args:
root (str or Path): Path to the directory where the dataset is located. root (str or Path): Path to the directory where the dataset is located.
......
...@@ -14,7 +14,7 @@ _SUPPORTED_SUBSETS = {"train", "test"} ...@@ -14,7 +14,7 @@ _SUPPORTED_SUBSETS = {"train", "test"}
class DR_VCTK(Dataset): class DR_VCTK(Dataset):
"""Create a dataset for *Device Recorded VCTK (Small subset version)* [:footcite:`Sarfjoo2018DeviceRV`]. """Create a dataset for *Device Recorded VCTK (Small subset version)* :cite:`Sarfjoo2018DeviceRV`.
Args: Args:
root (str or Path): Root directory where the dataset's top level directory is found. root (str or Path): Root directory where the dataset's top level directory is found.
......
...@@ -8,7 +8,7 @@ from torch.utils.data import Dataset ...@@ -8,7 +8,7 @@ from torch.utils.data import Dataset
class FluentSpeechCommands(Dataset): class FluentSpeechCommands(Dataset):
"""Create *Fluent Speech Commands* [:footcite:`fluent`] Dataset """Create *Fluent Speech Commands* :cite:`fluent` Dataset
Args: Args:
root (str of Path): Path to the directory where the dataset is found. root (str of Path): Path to the directory where the dataset is found.
......
...@@ -996,7 +996,7 @@ def load_gtzan_item(fileid: str, path: str, ext_audio: str) -> Tuple[Tensor, str ...@@ -996,7 +996,7 @@ def load_gtzan_item(fileid: str, path: str, ext_audio: str) -> Tuple[Tensor, str
class GTZAN(Dataset): class GTZAN(Dataset):
"""Create a Dataset for *GTZAN* [:footcite:`tzanetakis_essl_cook_2001`]. """Create a Dataset for *GTZAN* :cite:`tzanetakis_essl_cook_2001`.
Note: Note:
Please see http://marsyas.info/downloads/datasets.html if you are planning to use Please see http://marsyas.info/downloads/datasets.html if you are planning to use
......
...@@ -9,7 +9,7 @@ SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]] ...@@ -9,7 +9,7 @@ SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset): class LibriMix(Dataset):
r"""Create the *LibriMix* [:footcite:`cosentino2020librimix`] dataset. r"""Create the *LibriMix* :cite:`cosentino2020librimix` dataset.
Args: Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
......
...@@ -76,7 +76,7 @@ def _get_librispeech_metadata( ...@@ -76,7 +76,7 @@ def _get_librispeech_metadata(
class LIBRISPEECH(Dataset): class LIBRISPEECH(Dataset):
"""Create a Dataset for *LibriSpeech* [:footcite:`7178964`]. """Create a Dataset for *LibriSpeech* :cite:`7178964`.
Args: Args:
root (str or Path): Path to the directory where the dataset is found or downloaded. root (str or Path): Path to the directory where the dataset is found or downloaded.
......
...@@ -63,7 +63,7 @@ def load_libritts_item( ...@@ -63,7 +63,7 @@ def load_libritts_item(
class LIBRITTS(Dataset): class LIBRITTS(Dataset):
"""Create a Dataset for *LibriTTS* [:footcite:`Zen2019LibriTTSAC`]. """Create a Dataset for *LibriTTS* :cite:`Zen2019LibriTTSAC`.
Args: Args:
root (str or Path): Path to the directory where the dataset is found or downloaded. root (str or Path): Path to the directory where the dataset is found or downloaded.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment