diff --git a/0.9.0/_modules/index.html b/0.9.0/_modules/index.html new file mode 100644 index 0000000000..7e2d69e056 --- /dev/null +++ b/0.9.0/_modules/index.html @@ -0,0 +1,649 @@ + + + + + + + + + + + + Overview: module code — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+ + +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/backend/common.html b/0.9.0/_modules/torchaudio/backend/common.html new file mode 100644 index 0000000000..aab4bce816 --- /dev/null +++ b/0.9.0/_modules/torchaudio/backend/common.html @@ -0,0 +1,674 @@ + + + + + + + + + + + + torchaudio.backend.common — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.backend.common

+
[docs]class AudioMetaData: + """Return type of ``torchaudio.info`` function. + + This class is used by :ref:`"sox_io" backend<sox_io_backend>` and + :ref:`"soundfile" backend with the new interface<soundfile_backend>`. + + :ivar int sample_rate: Sample rate + :ivar int num_frames: The number of frames + :ivar int num_channels: The number of channels + :ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats, + or when it cannot be accurately inferred. + :ivar str encoding: Audio encoding + The values encoding can take are one of the following: + + * ``PCM_S``: Signed integer linear PCM + * ``PCM_U``: Unsigned integer linear PCM + * ``PCM_F``: Floating point linear PCM + * ``FLAC``: Flac, Free Lossless Audio Codec + * ``ULAW``: Mu-law + * ``ALAW``: A-law + * ``MP3`` : MP3, MPEG-1 Audio Layer III + * ``VORBIS``: OGG Vorbis + * ``AMR_WB``: Adaptive Multi-Rate + * ``AMR_NB``: Adaptive Multi-Rate Wideband + * ``OPUS``: Opus + * ``UNKNOWN`` : None of above + """ + def __init__( + self, + sample_rate: int, + num_frames: int, + num_channels: int, + bits_per_sample: int, + encoding: str, + ): + self.sample_rate = sample_rate + self.num_frames = num_frames + self.num_channels = num_channels + self.bits_per_sample = bits_per_sample + self.encoding = encoding + + def __str__(self): + return ( + f"AudioMetaData(" + f"sample_rate={self.sample_rate}, " + f"num_frames={self.num_frames}, " + f"num_channels={self.num_channels}, " + f"bits_per_sample={self.bits_per_sample}, " + f"encoding={self.encoding}" + f")" + )
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/backend/soundfile_backend.html b/0.9.0/_modules/torchaudio/backend/soundfile_backend.html new file mode 100644 index 0000000000..51235d2bb1 --- /dev/null +++ b/0.9.0/_modules/torchaudio/backend/soundfile_backend.html @@ -0,0 +1,1054 @@ + + + + + + + + + + + + torchaudio.backend.soundfile_backend — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.backend.soundfile_backend

+"""The new soundfile backend which will become default in 0.8.0 onward"""
+from typing import Tuple, Optional
+import warnings
+
+import torch
+from torchaudio._internal import module_utils as _mod_utils
+from .common import AudioMetaData
+
+
+if _mod_utils.is_module_available("soundfile"):
+    import soundfile
+
+
+# Mapping from soundfile subtype to number of bits per sample.
+# This is mostly heuristical and the value is set to 0 when it is irrelevant
+# (lossy formats) or when it can't be inferred.
+# For ADPCM (and G72X) subtypes, it's hard to infer the bit depth because it's not part of the standard:
+# According to https://en.wikipedia.org/wiki/Adaptive_differential_pulse-code_modulation#In_telephony,
+# the default seems to be 8 bits but it can be compressed further to 4 bits.
+# The dict is inspired from
+# https://github.com/bastibe/python-soundfile/blob/744efb4b01abc72498a96b09115b42a4cabd85e4/soundfile.py#L66-L94
+_SUBTYPE_TO_BITS_PER_SAMPLE = {
+    'PCM_S8': 8,  # Signed 8 bit data
+    'PCM_16': 16,  # Signed 16 bit data
+    'PCM_24': 24,  # Signed 24 bit data
+    'PCM_32': 32,  # Signed 32 bit data
+    'PCM_U8': 8,  # Unsigned 8 bit data (WAV and RAW only)
+    'FLOAT': 32,  # 32 bit float data
+    'DOUBLE': 64,  # 64 bit float data
+    'ULAW': 8,  # U-Law encoded. See https://en.wikipedia.org/wiki/G.711#Types
+    'ALAW': 8,  # A-Law encoded. See https://en.wikipedia.org/wiki/G.711#Types
+    'IMA_ADPCM': 0,  # IMA ADPCM.
+    'MS_ADPCM': 0,  # Microsoft ADPCM.
+    'GSM610': 0,  # GSM 6.10 encoding. (Wikipedia says 1.625 bit depth?? https://en.wikipedia.org/wiki/Full_Rate)
+    'VOX_ADPCM': 0,  # OKI / Dialogix ADPCM
+    'G721_32': 0,  # 32kbs G721 ADPCM encoding.
+    'G723_24': 0,  # 24kbs G723 ADPCM encoding.
+    'G723_40': 0,  # 40kbs G723 ADPCM encoding.
+    'DWVW_12': 12,  # 12 bit Delta Width Variable Word encoding.
+    'DWVW_16': 16,  # 16 bit Delta Width Variable Word encoding.
+    'DWVW_24': 24,  # 24 bit Delta Width Variable Word encoding.
+    'DWVW_N': 0,  # N bit Delta Width Variable Word encoding.
+    'DPCM_8': 8,  # 8 bit differential PCM (XI only)
+    'DPCM_16': 16,  # 16 bit differential PCM (XI only)
+    'VORBIS': 0,  # Xiph Vorbis encoding. (lossy)
+    'ALAC_16': 16,  # Apple Lossless Audio Codec (16 bit).
+    'ALAC_20': 20,  # Apple Lossless Audio Codec (20 bit).
+    'ALAC_24': 24,  # Apple Lossless Audio Codec (24 bit).
+    'ALAC_32': 32,  # Apple Lossless Audio Codec (32 bit).
+}
+
+
+def _get_bit_depth(subtype):
+    if subtype not in _SUBTYPE_TO_BITS_PER_SAMPLE:
+        warnings.warn(
+            f"The {subtype} subtype is unknown to TorchAudio. As a result, the bits_per_sample "
+            "attribute will be set to 0. If you are seeing this warning, please "
+            "report by opening an issue on github (after checking for existing/closed ones). "
+            "You may otherwise ignore this warning."
+        )
+    return _SUBTYPE_TO_BITS_PER_SAMPLE.get(subtype, 0)
+
+
+_SUBTYPE_TO_ENCODING = {
+    'PCM_S8': 'PCM_S',
+    'PCM_16': 'PCM_S',
+    'PCM_24': 'PCM_S',
+    'PCM_32': 'PCM_S',
+    'PCM_U8': 'PCM_U',
+    'FLOAT': 'PCM_F',
+    'DOUBLE': 'PCM_F',
+    'ULAW': 'ULAW',
+    'ALAW': 'ALAW',
+    'VORBIS': 'VORBIS',
+}
+
+
+def _get_encoding(format: str, subtype: str):
+    if format == 'FLAC':
+        return 'FLAC'
+    return _SUBTYPE_TO_ENCODING.get(subtype, 'UNKNOWN')
+
+
+
[docs]@_mod_utils.requires_module("soundfile") +def info(filepath: str, format: Optional[str] = None) -> AudioMetaData: + """Get signal information of an audio file. + + Note: + ``filepath`` argument is intentionally annotated as ``str`` only, even though it accepts + ``pathlib.Path`` object as well. This is for the consistency with ``"sox_io"`` backend, + which has a restriction on type annotation due to TorchScript compiler compatiblity. + + Args: + filepath (path-like object or file-like object): + Source of audio data. + format (str, optional): + Not used. PySoundFile does not accept format hint. + + Returns: + AudioMetaData: meta data of the given audio. + + """ + sinfo = soundfile.info(filepath) + return AudioMetaData( + sinfo.samplerate, + sinfo.frames, + sinfo.channels, + bits_per_sample=_get_bit_depth(sinfo.subtype), + encoding=_get_encoding(sinfo.format, sinfo.subtype), + )
+ + +_SUBTYPE2DTYPE = { + "PCM_S8": "int8", + "PCM_U8": "uint8", + "PCM_16": "int16", + "PCM_32": "int32", + "FLOAT": "float32", + "DOUBLE": "float64", +} + + +
[docs]@_mod_utils.requires_module("soundfile") +def load( + filepath: str, + frame_offset: int = 0, + num_frames: int = -1, + normalize: bool = True, + channels_first: bool = True, + format: Optional[str] = None, +) -> Tuple[torch.Tensor, int]: + """Load audio data from file. + + Note: + The formats this function can handle depend on the soundfile installation. + This function is tested on the following formats; + + * WAV + + * 32-bit floating-point + * 32-bit signed integer + * 16-bit signed integer + * 8-bit unsigned integer + + * FLAC + * OGG/VORBIS + * SPHERE + + By default (``normalize=True``, ``channels_first=True``), this function returns Tensor with + ``float32`` dtype and the shape of ``[channel, time]``. + The samples are normalized to fit in the range of ``[-1.0, 1.0]``. + + When the input format is WAV with integer type, such as 32-bit signed integer, 16-bit + signed integer and 8-bit unsigned integer (24-bit signed integer is not supported), + by providing ``normalize=False``, this function can return integer Tensor, where the samples + are expressed within the whole range of the corresponding dtype, that is, ``int32`` tensor + for 32-bit signed PCM, ``int16`` for 16-bit signed PCM and ``uint8`` for 8-bit unsigned PCM. + + ``normalize`` parameter has no effect on 32-bit floating-point WAV and other formats, such as + ``flac`` and ``mp3``. + For these formats, this function always returns ``float32`` Tensor with values normalized to + ``[-1.0, 1.0]``. + + Note: + ``filepath`` argument is intentionally annotated as ``str`` only, even though it accepts + ``pathlib.Path`` object as well. This is for the consistency with ``"sox_io"`` backend, + which has a restriction on type annotation due to TorchScript compiler compatiblity. + + Args: + filepath (path-like object or file-like object): + Source of audio data. + frame_offset (int): + Number of frames to skip before start reading data. + num_frames (int): + Maximum number of frames to read. ``-1`` reads all the remaining samples, + starting from ``frame_offset``. + This function may return the less number of frames if there is not enough + frames in the given file. + normalize (bool): + When ``True``, this function always return ``float32``, and sample values are + normalized to ``[-1.0, 1.0]``. + If input file is integer WAV, giving ``False`` will change the resulting Tensor type to + integer type. + This argument has no effect for formats other than integer WAV type. + channels_first (bool): + When True, the returned Tensor has dimension ``[channel, time]``. + Otherwise, the returned Tensor's dimension is ``[time, channel]``. + format (str, optional): + Not used. PySoundFile does not accept format hint. + + Returns: + Tuple[torch.Tensor, int]: Resulting Tensor and sample rate. + If the input file has integer wav format and normalization is off, then it has + integer type, else ``float32`` type. If ``channels_first=True``, it has + ``[channel, time]`` else ``[time, channel]``. + """ + with soundfile.SoundFile(filepath, "r") as file_: + if file_.format != "WAV" or normalize: + dtype = "float32" + elif file_.subtype not in _SUBTYPE2DTYPE: + raise ValueError(f"Unsupported subtype: {file_.subtype}") + else: + dtype = _SUBTYPE2DTYPE[file_.subtype] + + frames = file_._prepare_read(frame_offset, None, num_frames) + waveform = file_.read(frames, dtype, always_2d=True) + sample_rate = file_.samplerate + + waveform = torch.from_numpy(waveform) + if channels_first: + waveform = waveform.t() + return waveform, sample_rate
+ + +def _get_subtype_for_wav( + dtype: torch.dtype, + encoding: str, + bits_per_sample: int): + if not encoding: + if not bits_per_sample: + subtype = { + torch.uint8: "PCM_U8", + torch.int16: "PCM_16", + torch.int32: "PCM_32", + torch.float32: "FLOAT", + torch.float64: "DOUBLE", + }.get(dtype) + if not subtype: + raise ValueError(f"Unsupported dtype for wav: {dtype}") + return subtype + if bits_per_sample == 8: + return "PCM_U8" + return f"PCM_{bits_per_sample}" + if encoding == "PCM_S": + if not bits_per_sample: + return "PCM_32" + if bits_per_sample == 8: + raise ValueError("wav does not support 8-bit signed PCM encoding.") + return f"PCM_{bits_per_sample}" + if encoding == "PCM_U": + if bits_per_sample in (None, 8): + return "PCM_U8" + raise ValueError("wav only supports 8-bit unsigned PCM encoding.") + if encoding == "PCM_F": + if bits_per_sample in (None, 32): + return "FLOAT" + if bits_per_sample == 64: + return "DOUBLE" + raise ValueError("wav only supports 32/64-bit float PCM encoding.") + if encoding == "ULAW": + if bits_per_sample in (None, 8): + return "ULAW" + raise ValueError("wav only supports 8-bit mu-law encoding.") + if encoding == "ALAW": + if bits_per_sample in (None, 8): + return "ALAW" + raise ValueError("wav only supports 8-bit a-law encoding.") + raise ValueError(f"wav does not support {encoding}.") + + +def _get_subtype_for_sphere(encoding: str, bits_per_sample: int): + if encoding in (None, "PCM_S"): + return f"PCM_{bits_per_sample}" if bits_per_sample else "PCM_32" + if encoding in ("PCM_U", "PCM_F"): + raise ValueError(f"sph does not support {encoding} encoding.") + if encoding == "ULAW": + if bits_per_sample in (None, 8): + return "ULAW" + raise ValueError("sph only supports 8-bit for mu-law encoding.") + if encoding == "ALAW": + return "ALAW" + raise ValueError(f"sph does not support {encoding}.") + + +def _get_subtype( + dtype: torch.dtype, + format: str, + encoding: str, + bits_per_sample: int): + if format == "wav": + return _get_subtype_for_wav(dtype, encoding, bits_per_sample) + if format == "flac": + if encoding: + raise ValueError("flac does not support encoding.") + if not bits_per_sample: + return "PCM_24" + if bits_per_sample > 24: + raise ValueError("flac does not support bits_per_sample > 24.") + return "PCM_S8" if bits_per_sample == 8 else f"PCM_{bits_per_sample}" + if format in ("ogg", "vorbis"): + if encoding or bits_per_sample: + raise ValueError( + "ogg/vorbis does not support encoding/bits_per_sample.") + return "VORBIS" + if format == "sph": + return _get_subtype_for_sphere(encoding, bits_per_sample) + if format in ("nis", "nist"): + return "PCM_16" + raise ValueError(f"Unsupported format: {format}") + + +
[docs]@_mod_utils.requires_module("soundfile") +def save( + filepath: str, + src: torch.Tensor, + sample_rate: int, + channels_first: bool = True, + compression: Optional[float] = None, + format: Optional[str] = None, + encoding: Optional[str] = None, + bits_per_sample: Optional[int] = None, +): + """Save audio data to file. + + Note: + The formats this function can handle depend on the soundfile installation. + This function is tested on the following formats; + + * WAV + + * 32-bit floating-point + * 32-bit signed integer + * 16-bit signed integer + * 8-bit unsigned integer + + * FLAC + * OGG/VORBIS + * SPHERE + + Note: + ``filepath`` argument is intentionally annotated as ``str`` only, even though it accepts + ``pathlib.Path`` object as well. This is for the consistency with ``"sox_io"`` backend, + which has a restriction on type annotation due to TorchScript compiler compatiblity. + + Args: + filepath (str or pathlib.Path): Path to audio file. + src (torch.Tensor): Audio data to save. must be 2D tensor. + sample_rate (int): sampling rate + channels_first (bool): If ``True``, the given tensor is interpreted as ``[channel, time]``, + otherwise ``[time, channel]``. + compression (Optional[float]): Not used. + It is here only for interface compatibility reson with "sox_io" backend. + format (str, optional): Override the audio format. + When ``filepath`` argument is path-like object, audio format is + inferred from file extension. If the file extension is missing or + different, you can specify the correct format with this argument. + + When ``filepath`` argument is file-like object, + this argument is required. + + Valid values are ``"wav"``, ``"ogg"``, ``"vorbis"``, + ``"flac"`` and ``"sph"``. + encoding (str, optional): Changes the encoding for supported formats. + This argument is effective only for supported formats, sush as + ``"wav"``, ``""flac"`` and ``"sph"``. Valid values are; + + - ``"PCM_S"`` (signed integer Linear PCM) + - ``"PCM_U"`` (unsigned integer Linear PCM) + - ``"PCM_F"`` (floating point PCM) + - ``"ULAW"`` (mu-law) + - ``"ALAW"`` (a-law) + + bits_per_sample (int, optional): Changes the bit depth for the + supported formats. + When ``format`` is one of ``"wav"``, ``"flac"`` or ``"sph"``, + you can change the bit depth. + Valid values are ``8``, ``16``, ``24``, ``32`` and ``64``. + + Supported formats/encodings/bit depth/compression are: + + ``"wav"`` + - 32-bit floating-point PCM + - 32-bit signed integer PCM + - 24-bit signed integer PCM + - 16-bit signed integer PCM + - 8-bit unsigned integer PCM + - 8-bit mu-law + - 8-bit a-law + + Note: Default encoding/bit depth is determined by the dtype of + the input Tensor. + + ``"flac"`` + - 8-bit + - 16-bit + - 24-bit (default) + + ``"ogg"``, ``"vorbis"`` + - Doesn't accept changing configuration. + + ``"sph"`` + - 8-bit signed integer PCM + - 16-bit signed integer PCM + - 24-bit signed integer PCM + - 32-bit signed integer PCM (default) + - 8-bit mu-law + - 8-bit a-law + - 16-bit a-law + - 24-bit a-law + - 32-bit a-law + + """ + if src.ndim != 2: + raise ValueError(f"Expected 2D Tensor, got {src.ndim}D.") + if compression is not None: + warnings.warn( + '`save` function of "soundfile" backend does not support "compression" parameter. ' + "The argument is silently ignored." + ) + if hasattr(filepath, 'write'): + if format is None: + raise RuntimeError('`format` is required when saving to file object.') + ext = format.lower() + else: + ext = str(filepath).split(".")[-1].lower() + + if bits_per_sample not in (None, 8, 16, 24, 32, 64): + raise ValueError("Invalid bits_per_sample.") + subtype = _get_subtype(src.dtype, ext, encoding, bits_per_sample) + + # sph is a extension used in TED-LIUM but soundfile does not recognize it as NIST format, + # so we extend the extensions manually here + if ext in ["nis", "nist", "sph"] and format is None: + format = "NIST" + + if channels_first: + src = src.t() + + soundfile.write( + file=filepath, data=src, samplerate=sample_rate, subtype=subtype, format=format + )
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/backend/sox_io_backend.html b/0.9.0/_modules/torchaudio/backend/sox_io_backend.html new file mode 100644 index 0000000000..7ea986d331 --- /dev/null +++ b/0.9.0/_modules/torchaudio/backend/sox_io_backend.html @@ -0,0 +1,940 @@ + + + + + + + + + + + + torchaudio.backend.sox_io_backend — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.backend.sox_io_backend

+import os
+from typing import Tuple, Optional
+
+import torch
+from torchaudio._internal import (
+    module_utils as _mod_utils,
+)
+
+import torchaudio
+from .common import AudioMetaData
+
+
+
[docs]@_mod_utils.requires_sox() +def info( + filepath: str, + format: Optional[str] = None, +) -> AudioMetaData: + """Get signal information of an audio file. + + Args: + filepath (path-like object or file-like object): + Source of audio data. When the function is not compiled by TorchScript, + (e.g. ``torch.jit.script``), the following types are accepted; + + * ``path-like``: file path + * ``file-like``: Object with ``read(size: int) -> bytes`` method, + which returns byte string of at most ``size`` length. + + When the function is compiled by TorchScript, only ``str`` type is allowed. + + Note: + + * When the input type is file-like object, this function cannot + get the correct length (``num_samples``) for certain formats, + such as ``mp3`` and ``vorbis``. + In this case, the value of ``num_samples`` is ``0``. + * This argument is intentionally annotated as ``str`` only due to + TorchScript compiler compatibility. + + format (str, optional): + Override the format detection with the given format. + Providing the argument might help when libsox can not infer the format + from header or extension, + + Returns: + AudioMetaData: Metadata of the given audio. + """ + if not torch.jit.is_scripting(): + if hasattr(filepath, 'read'): + sinfo = torchaudio._torchaudio.get_info_fileobj(filepath, format) + return AudioMetaData(*sinfo) + filepath = os.fspath(filepath) + sinfo = torch.ops.torchaudio.sox_io_get_info(filepath, format) + return AudioMetaData(*sinfo)
+ + +
[docs]@_mod_utils.requires_sox() +def load( + filepath: str, + frame_offset: int = 0, + num_frames: int = -1, + normalize: bool = True, + channels_first: bool = True, + format: Optional[str] = None, +) -> Tuple[torch.Tensor, int]: + """Load audio data from file. + + Note: + This function can handle all the codecs that underlying libsox can handle, + however it is tested on the following formats; + + * WAV, AMB + + * 32-bit floating-point + * 32-bit signed integer + * 24-bit signed integer + * 16-bit signed integer + * 8-bit unsigned integer (WAV only) + + * MP3 + * FLAC + * OGG/VORBIS + * OPUS + * SPHERE + * AMR-NB + + To load ``MP3``, ``FLAC``, ``OGG/VORBIS``, ``OPUS`` and other codecs ``libsox`` does not + handle natively, your installation of ``torchaudio`` has to be linked to ``libsox`` + and corresponding codec libraries such as ``libmad`` or ``libmp3lame`` etc. + + By default (``normalize=True``, ``channels_first=True``), this function returns Tensor with + ``float32`` dtype and the shape of ``[channel, time]``. + The samples are normalized to fit in the range of ``[-1.0, 1.0]``. + + When the input format is WAV with integer type, such as 32-bit signed integer, 16-bit + signed integer, 24-bit signed integer, and 8-bit unsigned integer, by providing ``normalize=False``, + this function can return integer Tensor, where the samples are expressed within the whole range + of the corresponding dtype, that is, ``int32`` tensor for 32-bit signed PCM, + ``int16`` for 16-bit signed PCM and ``uint8`` for 8-bit unsigned PCM. Since torch does not + support ``int24`` dtype, 24-bit signed PCM are converted to ``int32`` tensors. + + ``normalize`` parameter has no effect on 32-bit floating-point WAV and other formats, such as + ``flac`` and ``mp3``. + For these formats, this function always returns ``float32`` Tensor with values normalized to + ``[-1.0, 1.0]``. + + Args: + filepath (path-like object or file-like object): + Source of audio data. When the function is not compiled by TorchScript, + (e.g. ``torch.jit.script``), the following types are accepted; + + * ``path-like``: file path + * ``file-like``: Object with ``read(size: int) -> bytes`` method, + which returns byte string of at most ``size`` length. + + When the function is compiled by TorchScript, only ``str`` type is allowed. + + Note: This argument is intentionally annotated as ``str`` only due to + TorchScript compiler compatibility. + frame_offset (int): + Number of frames to skip before start reading data. + num_frames (int): + Maximum number of frames to read. ``-1`` reads all the remaining samples, + starting from ``frame_offset``. + This function may return the less number of frames if there is not enough + frames in the given file. + normalize (bool): + When ``True``, this function always return ``float32``, and sample values are + normalized to ``[-1.0, 1.0]``. + If input file is integer WAV, giving ``False`` will change the resulting Tensor type to + integer type. + This argument has no effect for formats other than integer WAV type. + channels_first (bool): + When True, the returned Tensor has dimension ``[channel, time]``. + Otherwise, the returned Tensor's dimension is ``[time, channel]``. + format (str, optional): + Override the format detection with the given format. + Providing the argument might help when libsox can not infer the format + from header or extension, + + Returns: + Tuple[torch.Tensor, int]: Resulting Tensor and sample rate. + If the input file has integer wav format and normalization is off, then it has + integer type, else ``float32`` type. If ``channels_first=True``, it has + ``[channel, time]`` else ``[time, channel]``. + """ + if not torch.jit.is_scripting(): + if hasattr(filepath, 'read'): + return torchaudio._torchaudio.load_audio_fileobj( + filepath, frame_offset, num_frames, normalize, channels_first, format) + filepath = os.fspath(filepath) + return torch.ops.torchaudio.sox_io_load_audio_file( + filepath, frame_offset, num_frames, normalize, channels_first, format)
+ + +
[docs]@_mod_utils.requires_sox() +def save( + filepath: str, + src: torch.Tensor, + sample_rate: int, + channels_first: bool = True, + compression: Optional[float] = None, + format: Optional[str] = None, + encoding: Optional[str] = None, + bits_per_sample: Optional[int] = None, +): + """Save audio data to file. + + Args: + filepath (str or pathlib.Path): Path to save file. + This function also handles ``pathlib.Path`` objects, but is annotated + as ``str`` for TorchScript compiler compatibility. + src (torch.Tensor): Audio data to save. must be 2D tensor. + sample_rate (int): sampling rate + channels_first (bool): If ``True``, the given tensor is interpreted as ``[channel, time]``, + otherwise ``[time, channel]``. + compression (Optional[float]): Used for formats other than WAV. + This corresponds to ``-C`` option of ``sox`` command. + + ``"mp3"`` + Either bitrate (in ``kbps``) with quality factor, such as ``128.2``, or + VBR encoding with quality factor such as ``-4.2``. Default: ``-4.5``. + + ``"flac"`` + Whole number from ``0`` to ``8``. ``8`` is default and highest compression. + + ``"ogg"``, ``"vorbis"`` + Number from ``-1`` to ``10``; ``-1`` is the highest compression + and lowest quality. Default: ``3``. + + See the detail at http://sox.sourceforge.net/soxformat.html. + format (str, optional): Override the audio format. + When ``filepath`` argument is path-like object, audio format is infered from + file extension. If file extension is missing or different, you can specify the + correct format with this argument. + + When ``filepath`` argument is file-like object, this argument is required. + + Valid values are ``"wav"``, ``"mp3"``, ``"ogg"``, ``"vorbis"``, ``"amr-nb"``, + ``"amb"``, ``"flac"``, ``"sph"``, ``"gsm"``, and ``"htk"``. + + encoding (str, optional): Changes the encoding for the supported formats. + This argument is effective only for supported formats, such as ``"wav"``, ``""amb"`` + and ``"sph"``. Valid values are; + + - ``"PCM_S"`` (signed integer Linear PCM) + - ``"PCM_U"`` (unsigned integer Linear PCM) + - ``"PCM_F"`` (floating point PCM) + - ``"ULAW"`` (mu-law) + - ``"ALAW"`` (a-law) + + Default values + If not provided, the default value is picked based on ``format`` and ``bits_per_sample``. + + ``"wav"``, ``"amb"`` + - | If both ``encoding`` and ``bits_per_sample`` are not provided, the ``dtype`` of the + | Tensor is used to determine the default value. + - ``"PCM_U"`` if dtype is ``uint8`` + - ``"PCM_S"`` if dtype is ``int16`` or ``int32` + - ``"PCM_F"`` if dtype is ``float32`` + + - ``"PCM_U"`` if ``bits_per_sample=8`` + - ``"PCM_S"`` otherwise + + ``"sph"`` format; + - the default value is ``"PCM_S"`` + + bits_per_sample (int, optional): Changes the bit depth for the supported formats. + When ``format`` is one of ``"wav"``, ``"flac"``, ``"sph"``, or ``"amb"``, you can change the + bit depth. Valid values are ``8``, ``16``, ``32`` and ``64``. + + Default Value; + If not provided, the default values are picked based on ``format`` and ``"encoding"``; + + ``"wav"``, ``"amb"``; + - | If both ``encoding`` and ``bits_per_sample`` are not provided, the ``dtype`` of the + | Tensor is used. + - ``8`` if dtype is ``uint8`` + - ``16`` if dtype is ``int16`` + - ``32`` if dtype is ``int32`` or ``float32`` + + - ``8`` if ``encoding`` is ``"PCM_U"``, ``"ULAW"`` or ``"ALAW"`` + - ``16`` if ``encoding`` is ``"PCM_S"`` + - ``32`` if ``encoding`` is ``"PCM_F"`` + + ``"flac"`` format; + - the default value is ``24`` + + ``"sph"`` format; + - ``16`` if ``encoding`` is ``"PCM_U"``, ``"PCM_S"``, ``"PCM_F"`` or not provided. + - ``8`` if ``encoding`` is ``"ULAW"`` or ``"ALAW"`` + + ``"amb"`` format; + - ``8`` if ``encoding`` is ``"PCM_U"``, ``"ULAW"`` or ``"ALAW"`` + - ``16`` if ``encoding`` is ``"PCM_S"`` or not provided. + - ``32`` if ``encoding`` is ``"PCM_F"`` + + Supported formats/encodings/bit depth/compression are; + + ``"wav"``, ``"amb"`` + - 32-bit floating-point PCM + - 32-bit signed integer PCM + - 24-bit signed integer PCM + - 16-bit signed integer PCM + - 8-bit unsigned integer PCM + - 8-bit mu-law + - 8-bit a-law + + Note: Default encoding/bit depth is determined by the dtype of the input Tensor. + + ``"mp3"`` + Fixed bit rate (such as 128kHz) and variable bit rate compression. + Default: VBR with high quality. + + ``"flac"`` + - 8-bit + - 16-bit + - 24-bit (default) + + ``"ogg"``, ``"vorbis"`` + - Different quality level. Default: approx. 112kbps + + ``"sph"`` + - 8-bit signed integer PCM + - 16-bit signed integer PCM + - 24-bit signed integer PCM + - 32-bit signed integer PCM (default) + - 8-bit mu-law + - 8-bit a-law + - 16-bit a-law + - 24-bit a-law + - 32-bit a-law + + ``"amr-nb"`` + Bitrate ranging from 4.75 kbit/s to 12.2 kbit/s. Default: 4.75 kbit/s + + ``"gsm"`` + Lossy Speech Compression, CPU intensive. + + ``"htk"`` + Uses a default single-channel 16-bit PCM format. + + Note: + To save into formats that ``libsox`` does not handle natively, (such as ``"mp3"``, + ``"flac"``, ``"ogg"`` and ``"vorbis"``), your installation of ``torchaudio`` has + to be linked to ``libsox`` and corresponding codec libraries such as ``libmad`` + or ``libmp3lame`` etc. + """ + if not torch.jit.is_scripting(): + if hasattr(filepath, 'write'): + torchaudio._torchaudio.save_audio_fileobj( + filepath, src, sample_rate, channels_first, compression, + format, encoding, bits_per_sample) + return + filepath = os.fspath(filepath) + torch.ops.torchaudio.sox_io_save_audio_file( + filepath, src, sample_rate, channels_first, compression, format, encoding, bits_per_sample)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/backend/utils.html b/0.9.0/_modules/torchaudio/backend/utils.html new file mode 100644 index 0000000000..31b75299ba --- /dev/null +++ b/0.9.0/_modules/torchaudio/backend/utils.html @@ -0,0 +1,706 @@ + + + + + + + + + + + + torchaudio.backend.utils — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.backend.utils

+"""Defines utilities for switching audio backends"""
+import warnings
+from typing import Optional, List
+
+import torchaudio
+from torchaudio._internal import module_utils as _mod_utils
+from . import (
+    no_backend,
+    sox_io_backend,
+    soundfile_backend,
+)
+
+__all__ = [
+    'list_audio_backends',
+    'get_audio_backend',
+    'set_audio_backend',
+]
+
+
+
[docs]def list_audio_backends() -> List[str]: + """List available backends + + Returns: + List[str]: The list of available backends. + """ + backends = [] + if _mod_utils.is_module_available('soundfile'): + backends.append('soundfile') + if _mod_utils.is_sox_available(): + backends.append('sox_io') + return backends
+ + +
[docs]def set_audio_backend(backend: Optional[str]): + """Set the backend for I/O operation + + Args: + backend (Optional[str]): Name of the backend. + One of ``"sox_io"`` or ``"soundfile"`` based on availability + of the system. If ``None`` is provided the current backend is unassigned. + """ + if backend is not None and backend not in list_audio_backends(): + raise RuntimeError( + f'Backend "{backend}" is not one of ' + f'available backends: {list_audio_backends()}.') + + if backend is None: + module = no_backend + elif backend == 'sox_io': + module = sox_io_backend + elif backend == 'soundfile': + module = soundfile_backend + else: + raise NotImplementedError(f'Unexpected backend "{backend}"') + + for func in ['save', 'load', 'info']: + setattr(torchaudio, func, getattr(module, func))
+ + +def _init_audio_backend(): + backends = list_audio_backends() + if 'sox_io' in backends: + set_audio_backend('sox_io') + elif 'soundfile' in backends: + set_audio_backend('soundfile') + else: + warnings.warn('No audio backend is available.') + set_audio_backend(None) + + +
[docs]def get_audio_backend() -> Optional[str]: + """Get the name of the current backend + + Returns: + Optional[str]: The name of the current backend or ``None`` if no backend is assigned. + """ + if torchaudio.load == no_backend.load: + return None + if torchaudio.load == sox_io_backend.load: + return 'sox_io' + if torchaudio.load == soundfile_backend.load: + return 'soundfile' + raise ValueError('Unknown backend.')
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/compliance/kaldi.html b/0.9.0/_modules/torchaudio/compliance/kaldi.html new file mode 100644 index 0000000000..98c5e39e58 --- /dev/null +++ b/0.9.0/_modules/torchaudio/compliance/kaldi.html @@ -0,0 +1,1405 @@ + + + + + + + + + + + + torchaudio.compliance.kaldi — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.compliance.kaldi

+from typing import Tuple
+
+import math
+import torch
+from torch import Tensor
+
+import torchaudio
+import torchaudio._internal.fft
+from torchaudio._internal.module_utils import deprecated
+
+__all__ = [
+    'get_mel_banks',
+    'inverse_mel_scale',
+    'inverse_mel_scale_scalar',
+    'mel_scale',
+    'mel_scale_scalar',
+    'spectrogram',
+    'fbank',
+    'mfcc',
+    'vtln_warp_freq',
+    'vtln_warp_mel_freq',
+    'resample_waveform',
+]
+
+# numeric_limits<float>::epsilon() 1.1920928955078125e-07
+EPSILON = torch.tensor(torch.finfo(torch.float).eps)
+# 1 milliseconds = 0.001 seconds
+MILLISECONDS_TO_SECONDS = 0.001
+
+# window types
+HAMMING = 'hamming'
+HANNING = 'hanning'
+POVEY = 'povey'
+RECTANGULAR = 'rectangular'
+BLACKMAN = 'blackman'
+WINDOWS = [HAMMING, HANNING, POVEY, RECTANGULAR, BLACKMAN]
+
+
+def _get_epsilon(device, dtype):
+    return EPSILON.to(device=device, dtype=dtype)
+
+
+def _next_power_of_2(x: int) -> int:
+    r"""Returns the smallest power of 2 that is greater than x
+    """
+    return 1 if x == 0 else 2 ** (x - 1).bit_length()
+
+
+def _get_strided(waveform: Tensor, window_size: int, window_shift: int, snip_edges: bool) -> Tensor:
+    r"""Given a waveform (1D tensor of size ``num_samples``), it returns a 2D tensor (m, ``window_size``)
+    representing how the window is shifted along the waveform. Each row is a frame.
+
+    Args:
+        waveform (Tensor): Tensor of size ``num_samples``
+        window_size (int): Frame length
+        window_shift (int): Frame shift
+        snip_edges (bool): If True, end effects will be handled by outputting only frames that completely fit
+            in the file, and the number of frames depends on the frame_length.  If False, the number of frames
+            depends only on the frame_shift, and we reflect the data at the ends.
+
+    Returns:
+        Tensor: 2D tensor of size (m, ``window_size``) where each row is a frame
+    """
+    assert waveform.dim() == 1
+    num_samples = waveform.size(0)
+    strides = (window_shift * waveform.stride(0), waveform.stride(0))
+
+    if snip_edges:
+        if num_samples < window_size:
+            return torch.empty((0, 0), dtype=waveform.dtype, device=waveform.device)
+        else:
+            m = 1 + (num_samples - window_size) // window_shift
+    else:
+        reversed_waveform = torch.flip(waveform, [0])
+        m = (num_samples + (window_shift // 2)) // window_shift
+        pad = window_size // 2 - window_shift // 2
+        pad_right = reversed_waveform
+        if pad > 0:
+            # torch.nn.functional.pad returns [2,1,0,1,2] for 'reflect'
+            # but we want [2, 1, 0, 0, 1, 2]
+            pad_left = reversed_waveform[-pad:]
+            waveform = torch.cat((pad_left, waveform, pad_right), dim=0)
+        else:
+            # pad is negative so we want to trim the waveform at the front
+            waveform = torch.cat((waveform[-pad:], pad_right), dim=0)
+
+    sizes = (m, window_size)
+    return waveform.as_strided(sizes, strides)
+
+
+def _feature_window_function(window_type: str,
+                             window_size: int,
+                             blackman_coeff: float,
+                             device: torch.device,
+                             dtype: int,
+                             ) -> Tensor:
+    r"""Returns a window function with the given type and size
+    """
+    if window_type == HANNING:
+        return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype)
+    elif window_type == HAMMING:
+        return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46, device=device, dtype=dtype)
+    elif window_type == POVEY:
+        # like hanning but goes to zero at edges
+        return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype).pow(0.85)
+    elif window_type == RECTANGULAR:
+        return torch.ones(window_size, device=device, dtype=dtype)
+    elif window_type == BLACKMAN:
+        a = 2 * math.pi / (window_size - 1)
+        window_function = torch.arange(window_size, device=device, dtype=dtype)
+        # can't use torch.blackman_window as they use different coefficients
+        return (blackman_coeff - 0.5 * torch.cos(a * window_function) +
+                (0.5 - blackman_coeff) * torch.cos(2 * a * window_function)).to(device=device, dtype=dtype)
+    else:
+        raise Exception('Invalid window type ' + window_type)
+
+
+def _get_log_energy(strided_input: Tensor,
+                    epsilon: Tensor,
+                    energy_floor: float) -> Tensor:
+    r"""Returns the log energy of size (m) for a strided_input (m,*)
+    """
+    device, dtype = strided_input.device, strided_input.dtype
+    log_energy = torch.max(strided_input.pow(2).sum(1), epsilon).log()  # size (m)
+    if energy_floor == 0.0:
+        return log_energy
+    return torch.max(
+        log_energy, torch.tensor(math.log(energy_floor), device=device, dtype=dtype))
+
+
+def _get_waveform_and_window_properties(waveform: Tensor,
+                                        channel: int,
+                                        sample_frequency: float,
+                                        frame_shift: float,
+                                        frame_length: float,
+                                        round_to_power_of_two: bool,
+                                        preemphasis_coefficient: float) -> Tuple[Tensor, int, int, int]:
+    r"""Gets the waveform and window properties
+    """
+    channel = max(channel, 0)
+    assert channel < waveform.size(0), ('Invalid channel {} for size {}'.format(channel, waveform.size(0)))
+    waveform = waveform[channel, :]  # size (n)
+    window_shift = int(sample_frequency * frame_shift * MILLISECONDS_TO_SECONDS)
+    window_size = int(sample_frequency * frame_length * MILLISECONDS_TO_SECONDS)
+    padded_window_size = _next_power_of_2(window_size) if round_to_power_of_two else window_size
+
+    assert 2 <= window_size <= len(
+        waveform), ('choose a window size {} that is [2, {}]'
+                    .format(window_size, len(waveform)))
+    assert 0 < window_shift, '`window_shift` must be greater than 0'
+    assert padded_window_size % 2 == 0, 'the padded `window_size` must be divisible by two.' \
+                                        ' use `round_to_power_of_two` or change `frame_length`'
+    assert 0. <= preemphasis_coefficient <= 1.0, '`preemphasis_coefficient` must be between [0,1]'
+    assert sample_frequency > 0, '`sample_frequency` must be greater than zero'
+    return waveform, window_shift, window_size, padded_window_size
+
+
+def _get_window(waveform: Tensor,
+                padded_window_size: int,
+                window_size: int,
+                window_shift: int,
+                window_type: str,
+                blackman_coeff: float,
+                snip_edges: bool,
+                raw_energy: bool,
+                energy_floor: float,
+                dither: float,
+                remove_dc_offset: bool,
+                preemphasis_coefficient: float) -> Tuple[Tensor, Tensor]:
+    r"""Gets a window and its log energy
+
+    Returns:
+        (Tensor, Tensor): strided_input of size (m, ``padded_window_size``) and signal_log_energy of size (m)
+    """
+    device, dtype = waveform.device, waveform.dtype
+    epsilon = _get_epsilon(device, dtype)
+
+    # size (m, window_size)
+    strided_input = _get_strided(waveform, window_size, window_shift, snip_edges)
+
+    if dither != 0.0:
+        # Returns a random number strictly between 0 and 1
+        x = torch.max(epsilon, torch.rand(strided_input.shape, device=device, dtype=dtype))
+        rand_gauss = torch.sqrt(-2 * x.log()) * torch.cos(2 * math.pi * x)
+        strided_input = strided_input + rand_gauss * dither
+
+    if remove_dc_offset:
+        # Subtract each row/frame by its mean
+        row_means = torch.mean(strided_input, dim=1).unsqueeze(1)  # size (m, 1)
+        strided_input = strided_input - row_means
+
+    if raw_energy:
+        # Compute the log energy of each row/frame before applying preemphasis and
+        # window function
+        signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor)  # size (m)
+
+    if preemphasis_coefficient != 0.0:
+        # strided_input[i,j] -= preemphasis_coefficient * strided_input[i, max(0, j-1)] for all i,j
+        offset_strided_input = torch.nn.functional.pad(
+            strided_input.unsqueeze(0), (1, 0), mode='replicate').squeeze(0)  # size (m, window_size + 1)
+        strided_input = strided_input - preemphasis_coefficient * offset_strided_input[:, :-1]
+
+    # Apply window_function to each row/frame
+    window_function = _feature_window_function(
+        window_type, window_size, blackman_coeff, device, dtype).unsqueeze(0)  # size (1, window_size)
+    strided_input = strided_input * window_function  # size (m, window_size)
+
+    # Pad columns with zero until we reach size (m, padded_window_size)
+    if padded_window_size != window_size:
+        padding_right = padded_window_size - window_size
+        strided_input = torch.nn.functional.pad(
+            strided_input.unsqueeze(0), (0, padding_right), mode='constant', value=0).squeeze(0)
+
+    # Compute energy after window function (not the raw one)
+    if not raw_energy:
+        signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor)  # size (m)
+
+    return strided_input, signal_log_energy
+
+
+def _subtract_column_mean(tensor: Tensor, subtract_mean: bool) -> Tensor:
+    # subtracts the column mean of the tensor size (m, n) if subtract_mean=True
+    # it returns size (m, n)
+    if subtract_mean:
+        col_means = torch.mean(tensor, dim=0).unsqueeze(0)
+        tensor = tensor - col_means
+    return tensor
+
+
+
[docs]def spectrogram(waveform: Tensor, + blackman_coeff: float = 0.42, + channel: int = -1, + dither: float = 0.0, + energy_floor: float = 1.0, + frame_length: float = 25.0, + frame_shift: float = 10.0, + min_duration: float = 0.0, + preemphasis_coefficient: float = 0.97, + raw_energy: bool = True, + remove_dc_offset: bool = True, + round_to_power_of_two: bool = True, + sample_frequency: float = 16000.0, + snip_edges: bool = True, + subtract_mean: bool = False, + window_type: str = POVEY) -> Tensor: + r"""Create a spectrogram from a raw audio signal. This matches the input/output of Kaldi's + compute-spectrogram-feats. + + Args: + waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2) + blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``) + channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``) + dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set + the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``) + energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution: + this floor is applied to the zeroth component, representing the total signal energy. The floor on the + individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``) + frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``) + frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``) + min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``) + preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``) + raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``) + remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``) + round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input + to FFT. (Default: ``True``) + sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if + specified there) (Default: ``16000.0``) + snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit + in the file, and the number of frames depends on the frame_length. If False, the number of frames + depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``) + subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do + it this way. (Default: ``False``) + window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman') + (Default: ``'povey'``) + + Returns: + Tensor: A spectrogram identical to what Kaldi would output. The shape is + (m, ``padded_window_size // 2 + 1``) where m is calculated in _get_strided + """ + device, dtype = waveform.device, waveform.dtype + epsilon = _get_epsilon(device, dtype) + + waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties( + waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient) + + if len(waveform) < min_duration * sample_frequency: + # signal is too short + return torch.empty(0) + + strided_input, signal_log_energy = _get_window( + waveform, padded_window_size, window_size, window_shift, window_type, blackman_coeff, + snip_edges, raw_energy, energy_floor, dither, remove_dc_offset, preemphasis_coefficient) + + # size (m, padded_window_size // 2 + 1, 2) + fft = torchaudio._internal.fft.rfft(strided_input) + + # Convert the FFT into a power spectrum + power_spectrum = torch.max(fft.abs().pow(2.), epsilon).log() # size (m, padded_window_size // 2 + 1) + power_spectrum[:, 0] = signal_log_energy + + power_spectrum = _subtract_column_mean(power_spectrum, subtract_mean) + return power_spectrum
+ + +def inverse_mel_scale_scalar(mel_freq: float) -> float: + return 700.0 * (math.exp(mel_freq / 1127.0) - 1.0) + + +def inverse_mel_scale(mel_freq: Tensor) -> Tensor: + return 700.0 * ((mel_freq / 1127.0).exp() - 1.0) + + +def mel_scale_scalar(freq: float) -> float: + return 1127.0 * math.log(1.0 + freq / 700.0) + + +def mel_scale(freq: Tensor) -> Tensor: + return 1127.0 * (1.0 + freq / 700.0).log() + + +def vtln_warp_freq(vtln_low_cutoff: float, + vtln_high_cutoff: float, + low_freq: float, + high_freq: float, + vtln_warp_factor: float, + freq: Tensor) -> Tensor: + r"""This computes a VTLN warping function that is not the same as HTK's one, + but has similar inputs (this function has the advantage of never producing + empty bins). + + This function computes a warp function F(freq), defined between low_freq + and high_freq inclusive, with the following properties: + F(low_freq) == low_freq + F(high_freq) == high_freq + The function is continuous and piecewise linear with two inflection + points. + The lower inflection point (measured in terms of the unwarped + frequency) is at frequency l, determined as described below. + The higher inflection point is at a frequency h, determined as + described below. + If l <= f <= h, then F(f) = f/vtln_warp_factor. + If the higher inflection point (measured in terms of the unwarped + frequency) is at h, then max(h, F(h)) == vtln_high_cutoff. + Since (by the last point) F(h) == h/vtln_warp_factor, then + max(h, h/vtln_warp_factor) == vtln_high_cutoff, so + h = vtln_high_cutoff / max(1, 1/vtln_warp_factor). + = vtln_high_cutoff * min(1, vtln_warp_factor). + If the lower inflection point (measured in terms of the unwarped + frequency) is at l, then min(l, F(l)) == vtln_low_cutoff + This implies that l = vtln_low_cutoff / min(1, 1/vtln_warp_factor) + = vtln_low_cutoff * max(1, vtln_warp_factor) + Args: + vtln_low_cutoff (float): Lower frequency cutoffs for VTLN + vtln_high_cutoff (float): Upper frequency cutoffs for VTLN + low_freq (float): Lower frequency cutoffs in mel computation + high_freq (float): Upper frequency cutoffs in mel computation + vtln_warp_factor (float): Vtln warp factor + freq (Tensor): given frequency in Hz + + Returns: + Tensor: Freq after vtln warp + """ + assert vtln_low_cutoff > low_freq, 'be sure to set the vtln_low option higher than low_freq' + assert vtln_high_cutoff < high_freq, 'be sure to set the vtln_high option lower than high_freq [or negative]' + l = vtln_low_cutoff * max(1.0, vtln_warp_factor) + h = vtln_high_cutoff * min(1.0, vtln_warp_factor) + scale = 1.0 / vtln_warp_factor + Fl = scale * l # F(l) + Fh = scale * h # F(h) + assert l > low_freq and h < high_freq + # slope of left part of the 3-piece linear function + scale_left = (Fl - low_freq) / (l - low_freq) + # [slope of center part is just "scale"] + + # slope of right part of the 3-piece linear function + scale_right = (high_freq - Fh) / (high_freq - h) + + res = torch.empty_like(freq) + + outside_low_high_freq = torch.lt(freq, low_freq) | torch.gt(freq, high_freq) # freq < low_freq || freq > high_freq + before_l = torch.lt(freq, l) # freq < l + before_h = torch.lt(freq, h) # freq < h + after_h = torch.ge(freq, h) # freq >= h + + # order of operations matter here (since there is overlapping frequency regions) + res[after_h] = high_freq + scale_right * (freq[after_h] - high_freq) + res[before_h] = scale * freq[before_h] + res[before_l] = low_freq + scale_left * (freq[before_l] - low_freq) + res[outside_low_high_freq] = freq[outside_low_high_freq] + + return res + + +def vtln_warp_mel_freq(vtln_low_cutoff: float, + vtln_high_cutoff: float, + low_freq, high_freq: float, + vtln_warp_factor: float, + mel_freq: Tensor) -> Tensor: + r""" + Args: + vtln_low_cutoff (float): Lower frequency cutoffs for VTLN + vtln_high_cutoff (float): Upper frequency cutoffs for VTLN + low_freq (float): Lower frequency cutoffs in mel computation + high_freq (float): Upper frequency cutoffs in mel computation + vtln_warp_factor (float): Vtln warp factor + mel_freq (Tensor): Given frequency in Mel + + Returns: + Tensor: ``mel_freq`` after vtln warp + """ + return mel_scale(vtln_warp_freq(vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq, + vtln_warp_factor, inverse_mel_scale(mel_freq))) + + +def get_mel_banks(num_bins: int, + window_length_padded: int, + sample_freq: float, + low_freq: float, + high_freq: float, + vtln_low: float, + vtln_high: float, + vtln_warp_factor: float) -> Tuple[Tensor, Tensor]: + """ + Returns: + (Tensor, Tensor): The tuple consists of ``bins`` (which is + melbank of size (``num_bins``, ``num_fft_bins``)) and ``center_freqs`` (which is + center frequencies of bins of size (``num_bins``)). + """ + assert num_bins > 3, 'Must have at least 3 mel bins' + assert window_length_padded % 2 == 0 + num_fft_bins = window_length_padded / 2 + nyquist = 0.5 * sample_freq + + if high_freq <= 0.0: + high_freq += nyquist + + assert (0.0 <= low_freq < nyquist) and (0.0 < high_freq <= nyquist) and (low_freq < high_freq), \ + ('Bad values in options: low-freq {} and high-freq {} vs. nyquist {}'.format(low_freq, high_freq, nyquist)) + + # fft-bin width [think of it as Nyquist-freq / half-window-length] + fft_bin_width = sample_freq / window_length_padded + mel_low_freq = mel_scale_scalar(low_freq) + mel_high_freq = mel_scale_scalar(high_freq) + + # divide by num_bins+1 in next line because of end-effects where the bins + # spread out to the sides. + mel_freq_delta = (mel_high_freq - mel_low_freq) / (num_bins + 1) + + if vtln_high < 0.0: + vtln_high += nyquist + + assert vtln_warp_factor == 1.0 or ((low_freq < vtln_low < high_freq) and + (0.0 < vtln_high < high_freq) and (vtln_low < vtln_high)), \ + ('Bad values in options: vtln-low {} and vtln-high {}, versus ' + 'low-freq {} and high-freq {}'.format(vtln_low, vtln_high, low_freq, high_freq)) + + bin = torch.arange(num_bins).unsqueeze(1) + left_mel = mel_low_freq + bin * mel_freq_delta # size(num_bins, 1) + center_mel = mel_low_freq + (bin + 1.0) * mel_freq_delta # size(num_bins, 1) + right_mel = mel_low_freq + (bin + 2.0) * mel_freq_delta # size(num_bins, 1) + + if vtln_warp_factor != 1.0: + left_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, left_mel) + center_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, center_mel) + right_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, right_mel) + + center_freqs = inverse_mel_scale(center_mel) # size (num_bins) + # size(1, num_fft_bins) + mel = mel_scale(fft_bin_width * torch.arange(num_fft_bins)).unsqueeze(0) + + # size (num_bins, num_fft_bins) + up_slope = (mel - left_mel) / (center_mel - left_mel) + down_slope = (right_mel - mel) / (right_mel - center_mel) + + if vtln_warp_factor == 1.0: + # left_mel < center_mel < right_mel so we can min the two slopes and clamp negative values + bins = torch.max(torch.zeros(1), torch.min(up_slope, down_slope)) + else: + # warping can move the order of left_mel, center_mel, right_mel anywhere + bins = torch.zeros_like(up_slope) + up_idx = torch.gt(mel, left_mel) & torch.le(mel, center_mel) # left_mel < mel <= center_mel + down_idx = torch.gt(mel, center_mel) & torch.lt(mel, right_mel) # center_mel < mel < right_mel + bins[up_idx] = up_slope[up_idx] + bins[down_idx] = down_slope[down_idx] + + return bins, center_freqs + + +
[docs]def fbank(waveform: Tensor, + blackman_coeff: float = 0.42, + channel: int = -1, + dither: float = 0.0, + energy_floor: float = 1.0, + frame_length: float = 25.0, + frame_shift: float = 10.0, + high_freq: float = 0.0, + htk_compat: bool = False, + low_freq: float = 20.0, + min_duration: float = 0.0, + num_mel_bins: int = 23, + preemphasis_coefficient: float = 0.97, + raw_energy: bool = True, + remove_dc_offset: bool = True, + round_to_power_of_two: bool = True, + sample_frequency: float = 16000.0, + snip_edges: bool = True, + subtract_mean: bool = False, + use_energy: bool = False, + use_log_fbank: bool = True, + use_power: bool = True, + vtln_high: float = -500.0, + vtln_low: float = 100.0, + vtln_warp: float = 1.0, + window_type: str = POVEY) -> Tensor: + r"""Create a fbank from a raw audio signal. This matches the input/output of Kaldi's + compute-fbank-feats. + + Args: + waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2) + blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``) + channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``) + dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set + the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``) + energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution: + this floor is applied to the zeroth component, representing the total signal energy. The floor on the + individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``) + frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``) + frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``) + high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist) + (Default: ``0.0``) + htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible features + (need to change other parameters). (Default: ``False``) + low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``) + min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``) + num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``) + preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``) + raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``) + remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``) + round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input + to FFT. (Default: ``True``) + sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if + specified there) (Default: ``16000.0``) + snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit + in the file, and the number of frames depends on the frame_length. If False, the number of frames + depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``) + subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do + it this way. (Default: ``False``) + use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``) + use_log_fbank (bool, optional):If true, produce log-filterbank, else produce linear. (Default: ``True``) + use_power (bool, optional): If true, use power, else use magnitude. (Default: ``True``) + vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if + negative, offset from high-mel-freq (Default: ``-500.0``) + vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``) + vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``) + window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman') + (Default: ``'povey'``) + + Returns: + Tensor: A fbank identical to what Kaldi would output. The shape is (m, ``num_mel_bins + use_energy``) + where m is calculated in _get_strided + """ + device, dtype = waveform.device, waveform.dtype + + waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties( + waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient) + + if len(waveform) < min_duration * sample_frequency: + # signal is too short + return torch.empty(0, device=device, dtype=dtype) + + # strided_input, size (m, padded_window_size) and signal_log_energy, size (m) + strided_input, signal_log_energy = _get_window( + waveform, padded_window_size, window_size, window_shift, window_type, blackman_coeff, + snip_edges, raw_energy, energy_floor, dither, remove_dc_offset, preemphasis_coefficient) + + # size (m, padded_window_size // 2 + 1) + spectrum = torchaudio._internal.fft.rfft(strided_input).abs() + if use_power: + spectrum = spectrum.pow(2.) + + # size (num_mel_bins, padded_window_size // 2) + mel_energies, _ = get_mel_banks(num_mel_bins, padded_window_size, sample_frequency, + low_freq, high_freq, vtln_low, vtln_high, vtln_warp) + mel_energies = mel_energies.to(device=device, dtype=dtype) + + # pad right column with zeros and add dimension, size (num_mel_bins, padded_window_size // 2 + 1) + mel_energies = torch.nn.functional.pad(mel_energies, (0, 1), mode='constant', value=0) + + # sum with mel fiterbanks over the power spectrum, size (m, num_mel_bins) + mel_energies = torch.mm(spectrum, mel_energies.T) + if use_log_fbank: + # avoid log of zero (which should be prevented anyway by dithering) + mel_energies = torch.max(mel_energies, _get_epsilon(device, dtype)).log() + + # if use_energy then add it as the last column for htk_compat == true else first column + if use_energy: + signal_log_energy = signal_log_energy.unsqueeze(1) # size (m, 1) + # returns size (m, num_mel_bins + 1) + if htk_compat: + mel_energies = torch.cat((mel_energies, signal_log_energy), dim=1) + else: + mel_energies = torch.cat((signal_log_energy, mel_energies), dim=1) + + mel_energies = _subtract_column_mean(mel_energies, subtract_mean) + return mel_energies
+ + +def _get_dct_matrix(num_ceps: int, num_mel_bins: int) -> Tensor: + # returns a dct matrix of size (num_mel_bins, num_ceps) + # size (num_mel_bins, num_mel_bins) + dct_matrix = torchaudio.functional.create_dct(num_mel_bins, num_mel_bins, 'ortho') + # kaldi expects the first cepstral to be weighted sum of factor sqrt(1/num_mel_bins) + # this would be the first column in the dct_matrix for torchaudio as it expects a + # right multiply (which would be the first column of the kaldi's dct_matrix as kaldi + # expects a left multiply e.g. dct_matrix * vector). + dct_matrix[:, 0] = math.sqrt(1 / float(num_mel_bins)) + dct_matrix = dct_matrix[:, :num_ceps] + return dct_matrix + + +def _get_lifter_coeffs(num_ceps: int, cepstral_lifter: float) -> Tensor: + # returns size (num_ceps) + # Compute liftering coefficients (scaling on cepstral coeffs) + # coeffs are numbered slightly differently from HTK: the zeroth index is C0, which is not affected. + i = torch.arange(num_ceps) + return 1.0 + 0.5 * cepstral_lifter * torch.sin(math.pi * i / cepstral_lifter) + + +
[docs]def mfcc( + waveform: Tensor, + blackman_coeff: float = 0.42, + cepstral_lifter: float = 22.0, + channel: int = -1, + dither: float = 0.0, + energy_floor: float = 1.0, + frame_length: float = 25.0, + frame_shift: float = 10.0, + high_freq: float = 0.0, + htk_compat: bool = False, + low_freq: float = 20.0, + num_ceps: int = 13, + min_duration: float = 0.0, + num_mel_bins: int = 23, + preemphasis_coefficient: float = 0.97, + raw_energy: bool = True, + remove_dc_offset: bool = True, + round_to_power_of_two: bool = True, + sample_frequency: float = 16000.0, + snip_edges: bool = True, + subtract_mean: bool = False, + use_energy: bool = False, + vtln_high: float = -500.0, + vtln_low: float = 100.0, + vtln_warp: float = 1.0, + window_type: str = POVEY) -> Tensor: + r"""Create a mfcc from a raw audio signal. This matches the input/output of Kaldi's + compute-mfcc-feats. + + Args: + waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2) + blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``) + cepstral_lifter (float, optional): Constant that controls scaling of MFCCs (Default: ``22.0``) + channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``) + dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set + the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``) + energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution: + this floor is applied to the zeroth component, representing the total signal energy. The floor on the + individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``) + frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``) + frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``) + high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist) + (Default: ``0.0``) + htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible + features (need to change other parameters). (Default: ``False``) + low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``) + num_ceps (int, optional): Number of cepstra in MFCC computation (including C0) (Default: ``13``) + min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``) + num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``) + preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``) + raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``) + remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``) + round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input + to FFT. (Default: ``True``) + sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if + specified there) (Default: ``16000.0``) + snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit + in the file, and the number of frames depends on the frame_length. If False, the number of frames + depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``) + subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do + it this way. (Default: ``False``) + use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``) + vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if + negative, offset from high-mel-freq (Default: ``-500.0``) + vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``) + vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``) + window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman') + (Default: ``"povey"``) + + Returns: + Tensor: A mfcc identical to what Kaldi would output. The shape is (m, ``num_ceps``) + where m is calculated in _get_strided + """ + assert num_ceps <= num_mel_bins, 'num_ceps cannot be larger than num_mel_bins: %d vs %d' % (num_ceps, num_mel_bins) + + device, dtype = waveform.device, waveform.dtype + + # The mel_energies should not be squared (use_power=True), not have mean subtracted + # (subtract_mean=False), and use log (use_log_fbank=True). + # size (m, num_mel_bins + use_energy) + feature = fbank(waveform=waveform, blackman_coeff=blackman_coeff, channel=channel, + dither=dither, energy_floor=energy_floor, frame_length=frame_length, + frame_shift=frame_shift, high_freq=high_freq, htk_compat=htk_compat, + low_freq=low_freq, min_duration=min_duration, num_mel_bins=num_mel_bins, + preemphasis_coefficient=preemphasis_coefficient, raw_energy=raw_energy, + remove_dc_offset=remove_dc_offset, round_to_power_of_two=round_to_power_of_two, + sample_frequency=sample_frequency, snip_edges=snip_edges, subtract_mean=False, + use_energy=use_energy, use_log_fbank=True, use_power=True, + vtln_high=vtln_high, vtln_low=vtln_low, vtln_warp=vtln_warp, window_type=window_type) + + if use_energy: + # size (m) + signal_log_energy = feature[:, num_mel_bins if htk_compat else 0] + # offset is 0 if htk_compat==True else 1 + mel_offset = int(not htk_compat) + feature = feature[:, mel_offset:(num_mel_bins + mel_offset)] + + # size (num_mel_bins, num_ceps) + dct_matrix = _get_dct_matrix(num_ceps, num_mel_bins).to(dtype=dtype, device=device) + + # size (m, num_ceps) + feature = feature.matmul(dct_matrix) + + if cepstral_lifter != 0.0: + # size (1, num_ceps) + lifter_coeffs = _get_lifter_coeffs(num_ceps, cepstral_lifter).unsqueeze(0) + feature *= lifter_coeffs.to(device=device, dtype=dtype) + + # if use_energy then replace the last column for htk_compat == true else first column + if use_energy: + feature[:, 0] = signal_log_energy + + if htk_compat: + energy = feature[:, 0].unsqueeze(1) # size (m, 1) + feature = feature[:, 1:] # size (m, num_ceps - 1) + if not use_energy: + # scale on C0 (actually removing a scale we previously added that's + # part of one common definition of the cosine transform.) + energy *= math.sqrt(2) + + feature = torch.cat((feature, energy), dim=1) + + feature = _subtract_column_mean(feature, subtract_mean) + return feature
+ + +
[docs]@deprecated("Please use `torchaudio.functional.resample`.", "0.10") +def resample_waveform(waveform: Tensor, + orig_freq: float, + new_freq: float, + lowpass_filter_width: int = 6, + rolloff: float = 0.99, + resampling_method: str = "sinc_interpolation") -> Tensor: + r"""Resamples the waveform at the new frequency. + + This is a wrapper around ``torchaudio.functional.resample``. + + Args: + waveform (Tensor): The input signal of size (..., time) + orig_freq (float): The original frequency of the signal + new_freq (float): The desired frequency + lowpass_filter_width (int, optional): Controls the sharpness of the filter, more == sharper + but less efficient. We suggest around 4 to 10 for normal use. (Default: ``6``) + rolloff (float, optional): The roll-off frequency of the filter, as a fraction of the Nyquist. + Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: ``0.99``) + resampling_method (str, optional): The resampling method to use. + Options: [``sinc_interpolation``, ``kaiser_window``] (Default: ``'sinc_interpolation'``) + + Returns: + Tensor: The waveform at the new frequency + """ + return torchaudio.functional.resample(waveform, orig_freq, new_freq, lowpass_filter_width, + rolloff, resampling_method)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/datasets/cmuarctic.html b/0.9.0/_modules/torchaudio/datasets/cmuarctic.html new file mode 100644 index 0000000000..7cac41051b --- /dev/null +++ b/0.9.0/_modules/torchaudio/datasets/cmuarctic.html @@ -0,0 +1,796 @@ + + + + + + + + + + + + torchaudio.datasets.cmuarctic — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.datasets.cmuarctic

+import os
+import csv
+from pathlib import Path
+from typing import Tuple, Union
+
+import torchaudio
+from torch import Tensor
+from torch.utils.data import Dataset
+from torchaudio.datasets.utils import (
+    download_url,
+    extract_archive,
+)
+
+URL = "aew"
+FOLDER_IN_ARCHIVE = "ARCTIC"
+_CHECKSUMS = {
+    "http://festvox.org/cmu_arctic/packed/cmu_us_aew_arctic.tar.bz2":
+    "4382b116efcc8339c37e01253cb56295",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_ahw_arctic.tar.bz2":
+    "b072d6e961e3f36a2473042d097d6da9",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_aup_arctic.tar.bz2":
+    "5301c7aee8919d2abd632e2667adfa7f",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_awb_arctic.tar.bz2":
+    "280fdff1e9857119d9a2c57b50e12db7",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_axb_arctic.tar.bz2":
+    "5e21cb26c6529c533df1d02ccde5a186",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_bdl_arctic.tar.bz2":
+    "b2c3e558f656af2e0a65da0ac0c3377a",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_clb_arctic.tar.bz2":
+    "3957c503748e3ce17a3b73c1b9861fb0",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_eey_arctic.tar.bz2":
+    "59708e932d27664f9eda3e8e6859969b",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_fem_arctic.tar.bz2":
+    "dba4f992ff023347c07c304bf72f4c73",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_gka_arctic.tar.bz2":
+    "24a876ea7335c1b0ff21460e1241340f",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_jmk_arctic.tar.bz2":
+    "afb69d95f02350537e8a28df5ab6004b",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_ksp_arctic.tar.bz2":
+    "4ce5b3b91a0a54b6b685b1b05aa0b3be",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_ljm_arctic.tar.bz2":
+    "6f45a3b2c86a4ed0465b353be291f77d",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_lnh_arctic.tar.bz2":
+    "c6a15abad5c14d27f4ee856502f0232f",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_rms_arctic.tar.bz2":
+    "71072c983df1e590d9e9519e2a621f6e",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_rxr_arctic.tar.bz2":
+    "3771ff03a2f5b5c3b53aa0a68b9ad0d5",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_slp_arctic.tar.bz2":
+    "9cbf984a832ea01b5058ba9a96862850",
+    "http://festvox.org/cmu_arctic/packed/cmu_us_slt_arctic.tar.bz2":
+    "959eecb2cbbc4ac304c6b92269380c81",
+}
+
+
+def load_cmuarctic_item(line: str,
+                        path: str,
+                        folder_audio: str,
+                        ext_audio: str) -> Tuple[Tensor, int, str, str]:
+
+    utterance_id, utterance = line[0].strip().split(" ", 2)[1:]
+
+    # Remove space, double quote, and single parenthesis from utterance
+    utterance = utterance[1:-3]
+
+    file_audio = os.path.join(path, folder_audio, utterance_id + ext_audio)
+
+    # Load audio
+    waveform, sample_rate = torchaudio.load(file_audio)
+
+    return (
+        waveform,
+        sample_rate,
+        utterance,
+        utterance_id.split("_")[1]
+    )
+
+
+
[docs]class CMUARCTIC(Dataset): + """Create a Dataset for CMU_ARCTIC. + + Args: + root (str or Path): Path to the directory where the dataset is found or downloaded. + url (str, optional): + The URL to download the dataset from or the type of the dataset to dowload. + (default: ``"aew"``) + Allowed type values are ``"aew"``, ``"ahw"``, ``"aup"``, ``"awb"``, ``"axb"``, ``"bdl"``, + ``"clb"``, ``"eey"``, ``"fem"``, ``"gka"``, ``"jmk"``, ``"ksp"``, ``"ljm"``, ``"lnh"``, + ``"rms"``, ``"rxr"``, ``"slp"`` or ``"slt"``. + folder_in_archive (str, optional): + The top-level directory of the dataset. (default: ``"ARCTIC"``) + download (bool, optional): + Whether to download the dataset if it is not found at root path. (default: ``False``). + """ + + _file_text = "txt.done.data" + _folder_text = "etc" + _ext_audio = ".wav" + _folder_audio = "wav" + + def __init__(self, + root: Union[str, Path], + url: str = URL, + folder_in_archive: str = FOLDER_IN_ARCHIVE, + download: bool = False) -> None: + + if url in [ + "aew", + "ahw", + "aup", + "awb", + "axb", + "bdl", + "clb", + "eey", + "fem", + "gka", + "jmk", + "ksp", + "ljm", + "lnh", + "rms", + "rxr", + "slp", + "slt" + ]: + + url = "cmu_us_" + url + "_arctic" + ext_archive = ".tar.bz2" + base_url = "http://www.festvox.org/cmu_arctic/packed/" + + url = os.path.join(base_url, url + ext_archive) + + # Get string representation of 'root' in case Path object is passed + root = os.fspath(root) + + basename = os.path.basename(url) + root = os.path.join(root, folder_in_archive) + if not os.path.isdir(root): + os.mkdir(root) + archive = os.path.join(root, basename) + + basename = basename.split(".")[0] + + self._path = os.path.join(root, basename) + + if download: + if not os.path.isdir(self._path): + if not os.path.isfile(archive): + checksum = _CHECKSUMS.get(url, None) + download_url(url, root, hash_value=checksum, hash_type="md5") + extract_archive(archive) + + self._text = os.path.join(self._path, self._folder_text, self._file_text) + + with open(self._text, "r") as text: + walker = csv.reader(text, delimiter="\n") + self._walker = list(walker) + +
[docs] def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]: + """Load the n-th sample from the dataset. + + Args: + n (int): The index of the sample to be loaded + + Returns: + tuple: ``(waveform, sample_rate, utterance, utterance_id)`` + """ + line = self._walker[n] + return load_cmuarctic_item(line, self._path, self._folder_audio, self._ext_audio)
+ + def __len__(self) -> int: + return len(self._walker)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/datasets/commonvoice.html b/0.9.0/_modules/torchaudio/datasets/commonvoice.html new file mode 100644 index 0000000000..ed847cbccd --- /dev/null +++ b/0.9.0/_modules/torchaudio/datasets/commonvoice.html @@ -0,0 +1,699 @@ + + + + + + + + + + + + torchaudio.datasets.commonvoice — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.datasets.commonvoice

+import csv
+import os
+from pathlib import Path
+from typing import List, Dict, Tuple, Union
+
+from torch import Tensor
+from torch.utils.data import Dataset
+
+import torchaudio
+
+
+def load_commonvoice_item(line: List[str],
+                          header: List[str],
+                          path: str,
+                          folder_audio: str,
+                          ext_audio: str) -> Tuple[Tensor, int, Dict[str, str]]:
+    # Each line as the following data:
+    # client_id, path, sentence, up_votes, down_votes, age, gender, accent
+
+    assert header[1] == "path"
+    fileid = line[1]
+    filename = os.path.join(path, folder_audio, fileid)
+    if not filename.endswith(ext_audio):
+        filename += ext_audio
+    waveform, sample_rate = torchaudio.load(filename)
+
+    dic = dict(zip(header, line))
+
+    return waveform, sample_rate, dic
+
+
+
[docs]class COMMONVOICE(Dataset): + """Create a Dataset for CommonVoice. + + Args: + root (str or Path): Path to the directory where the dataset is located. + (Where the ``tsv`` file is present.) + tsv (str, optional): + The name of the tsv file used to construct the metadata, such as + ``"train.tsv"``, ``"test.tsv"``, ``"dev.tsv"``, ``"invalidated.tsv"``, + ``"validated.tsv"`` and ``"other.tsv"``. (default: ``"train.tsv"``) + """ + + _ext_txt = ".txt" + _ext_audio = ".mp3" + _folder_audio = "clips" + + def __init__(self, + root: Union[str, Path], + tsv: str = "train.tsv") -> None: + + # Get string representation of 'root' in case Path object is passed + self._path = os.fspath(root) + self._tsv = os.path.join(self._path, tsv) + + with open(self._tsv, "r") as tsv_: + walker = csv.reader(tsv_, delimiter="\t") + self._header = next(walker) + self._walker = list(walker) + +
[docs] def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]: + """Load the n-th sample from the dataset. + + Args: + n (int): The index of the sample to be loaded + + Returns: + tuple: ``(waveform, sample_rate, dictionary)``, where dictionary is built + from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``, + ``up_votes``, ``down_votes``, ``age``, ``gender`` and ``accent``. + """ + line = self._walker[n] + return load_commonvoice_item(line, self._header, self._path, self._folder_audio, self._ext_audio)
+ + def __len__(self) -> int: + return len(self._walker)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/datasets/gtzan.html b/0.9.0/_modules/torchaudio/datasets/gtzan.html new file mode 100644 index 0000000000..0001351d77 --- /dev/null +++ b/0.9.0/_modules/torchaudio/datasets/gtzan.html @@ -0,0 +1,1736 @@ + + + + + + + + + + + + torchaudio.datasets.gtzan — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.datasets.gtzan

+import os
+from pathlib import Path
+from typing import Tuple, Optional, Union
+
+import torchaudio
+from torch import Tensor
+from torch.utils.data import Dataset
+from torchaudio.datasets.utils import (
+    download_url,
+    extract_archive,
+)
+
+# The following lists prefixed with `filtered_` provide a filtered split
+# that:
+#
+# a. Mitigate a known issue with GTZAN (duplication)
+#
+# b. Provide a standard split for testing it against other
+#    methods (e.g. the one in jordipons/sklearn-audio-transfer-learning).
+#
+# Those are used when GTZAN is initialised with the `filtered` keyword.
+# The split was taken from (github) jordipons/sklearn-audio-transfer-learning.
+
+gtzan_genres = [
+    "blues",
+    "classical",
+    "country",
+    "disco",
+    "hiphop",
+    "jazz",
+    "metal",
+    "pop",
+    "reggae",
+    "rock",
+]
+
+filtered_test = [
+    "blues.00012",
+    "blues.00013",
+    "blues.00014",
+    "blues.00015",
+    "blues.00016",
+    "blues.00017",
+    "blues.00018",
+    "blues.00019",
+    "blues.00020",
+    "blues.00021",
+    "blues.00022",
+    "blues.00023",
+    "blues.00024",
+    "blues.00025",
+    "blues.00026",
+    "blues.00027",
+    "blues.00028",
+    "blues.00061",
+    "blues.00062",
+    "blues.00063",
+    "blues.00064",
+    "blues.00065",
+    "blues.00066",
+    "blues.00067",
+    "blues.00068",
+    "blues.00069",
+    "blues.00070",
+    "blues.00071",
+    "blues.00072",
+    "blues.00098",
+    "blues.00099",
+    "classical.00011",
+    "classical.00012",
+    "classical.00013",
+    "classical.00014",
+    "classical.00015",
+    "classical.00016",
+    "classical.00017",
+    "classical.00018",
+    "classical.00019",
+    "classical.00020",
+    "classical.00021",
+    "classical.00022",
+    "classical.00023",
+    "classical.00024",
+    "classical.00025",
+    "classical.00026",
+    "classical.00027",
+    "classical.00028",
+    "classical.00029",
+    "classical.00034",
+    "classical.00035",
+    "classical.00036",
+    "classical.00037",
+    "classical.00038",
+    "classical.00039",
+    "classical.00040",
+    "classical.00041",
+    "classical.00049",
+    "classical.00077",
+    "classical.00078",
+    "classical.00079",
+    "country.00030",
+    "country.00031",
+    "country.00032",
+    "country.00033",
+    "country.00034",
+    "country.00035",
+    "country.00036",
+    "country.00037",
+    "country.00038",
+    "country.00039",
+    "country.00040",
+    "country.00043",
+    "country.00044",
+    "country.00046",
+    "country.00047",
+    "country.00048",
+    "country.00050",
+    "country.00051",
+    "country.00053",
+    "country.00054",
+    "country.00055",
+    "country.00056",
+    "country.00057",
+    "country.00058",
+    "country.00059",
+    "country.00060",
+    "country.00061",
+    "country.00062",
+    "country.00063",
+    "country.00064",
+    "disco.00001",
+    "disco.00021",
+    "disco.00058",
+    "disco.00062",
+    "disco.00063",
+    "disco.00064",
+    "disco.00065",
+    "disco.00066",
+    "disco.00069",
+    "disco.00076",
+    "disco.00077",
+    "disco.00078",
+    "disco.00079",
+    "disco.00080",
+    "disco.00081",
+    "disco.00082",
+    "disco.00083",
+    "disco.00084",
+    "disco.00085",
+    "disco.00086",
+    "disco.00087",
+    "disco.00088",
+    "disco.00091",
+    "disco.00092",
+    "disco.00093",
+    "disco.00094",
+    "disco.00096",
+    "disco.00097",
+    "disco.00099",
+    "hiphop.00000",
+    "hiphop.00026",
+    "hiphop.00027",
+    "hiphop.00030",
+    "hiphop.00040",
+    "hiphop.00043",
+    "hiphop.00044",
+    "hiphop.00045",
+    "hiphop.00051",
+    "hiphop.00052",
+    "hiphop.00053",
+    "hiphop.00054",
+    "hiphop.00062",
+    "hiphop.00063",
+    "hiphop.00064",
+    "hiphop.00065",
+    "hiphop.00066",
+    "hiphop.00067",
+    "hiphop.00068",
+    "hiphop.00069",
+    "hiphop.00070",
+    "hiphop.00071",
+    "hiphop.00072",
+    "hiphop.00073",
+    "hiphop.00074",
+    "hiphop.00075",
+    "hiphop.00099",
+    "jazz.00073",
+    "jazz.00074",
+    "jazz.00075",
+    "jazz.00076",
+    "jazz.00077",
+    "jazz.00078",
+    "jazz.00079",
+    "jazz.00080",
+    "jazz.00081",
+    "jazz.00082",
+    "jazz.00083",
+    "jazz.00084",
+    "jazz.00085",
+    "jazz.00086",
+    "jazz.00087",
+    "jazz.00088",
+    "jazz.00089",
+    "jazz.00090",
+    "jazz.00091",
+    "jazz.00092",
+    "jazz.00093",
+    "jazz.00094",
+    "jazz.00095",
+    "jazz.00096",
+    "jazz.00097",
+    "jazz.00098",
+    "jazz.00099",
+    "metal.00012",
+    "metal.00013",
+    "metal.00014",
+    "metal.00015",
+    "metal.00022",
+    "metal.00023",
+    "metal.00025",
+    "metal.00026",
+    "metal.00027",
+    "metal.00028",
+    "metal.00029",
+    "metal.00030",
+    "metal.00031",
+    "metal.00032",
+    "metal.00033",
+    "metal.00038",
+    "metal.00039",
+    "metal.00067",
+    "metal.00070",
+    "metal.00073",
+    "metal.00074",
+    "metal.00075",
+    "metal.00078",
+    "metal.00083",
+    "metal.00085",
+    "metal.00087",
+    "metal.00088",
+    "pop.00000",
+    "pop.00001",
+    "pop.00013",
+    "pop.00014",
+    "pop.00043",
+    "pop.00063",
+    "pop.00064",
+    "pop.00065",
+    "pop.00066",
+    "pop.00069",
+    "pop.00070",
+    "pop.00071",
+    "pop.00072",
+    "pop.00073",
+    "pop.00074",
+    "pop.00075",
+    "pop.00076",
+    "pop.00077",
+    "pop.00078",
+    "pop.00079",
+    "pop.00082",
+    "pop.00088",
+    "pop.00089",
+    "pop.00090",
+    "pop.00091",
+    "pop.00092",
+    "pop.00093",
+    "pop.00094",
+    "pop.00095",
+    "pop.00096",
+    "reggae.00034",
+    "reggae.00035",
+    "reggae.00036",
+    "reggae.00037",
+    "reggae.00038",
+    "reggae.00039",
+    "reggae.00040",
+    "reggae.00046",
+    "reggae.00047",
+    "reggae.00048",
+    "reggae.00052",
+    "reggae.00053",
+    "reggae.00064",
+    "reggae.00065",
+    "reggae.00066",
+    "reggae.00067",
+    "reggae.00068",
+    "reggae.00071",
+    "reggae.00079",
+    "reggae.00082",
+    "reggae.00083",
+    "reggae.00084",
+    "reggae.00087",
+    "reggae.00088",
+    "reggae.00089",
+    "reggae.00090",
+    "rock.00010",
+    "rock.00011",
+    "rock.00012",
+    "rock.00013",
+    "rock.00014",
+    "rock.00015",
+    "rock.00027",
+    "rock.00028",
+    "rock.00029",
+    "rock.00030",
+    "rock.00031",
+    "rock.00032",
+    "rock.00033",
+    "rock.00034",
+    "rock.00035",
+    "rock.00036",
+    "rock.00037",
+    "rock.00039",
+    "rock.00040",
+    "rock.00041",
+    "rock.00042",
+    "rock.00043",
+    "rock.00044",
+    "rock.00045",
+    "rock.00046",
+    "rock.00047",
+    "rock.00048",
+    "rock.00086",
+    "rock.00087",
+    "rock.00088",
+    "rock.00089",
+    "rock.00090",
+]
+
+filtered_train = [
+    "blues.00029",
+    "blues.00030",
+    "blues.00031",
+    "blues.00032",
+    "blues.00033",
+    "blues.00034",
+    "blues.00035",
+    "blues.00036",
+    "blues.00037",
+    "blues.00038",
+    "blues.00039",
+    "blues.00040",
+    "blues.00041",
+    "blues.00042",
+    "blues.00043",
+    "blues.00044",
+    "blues.00045",
+    "blues.00046",
+    "blues.00047",
+    "blues.00048",
+    "blues.00049",
+    "blues.00073",
+    "blues.00074",
+    "blues.00075",
+    "blues.00076",
+    "blues.00077",
+    "blues.00078",
+    "blues.00079",
+    "blues.00080",
+    "blues.00081",
+    "blues.00082",
+    "blues.00083",
+    "blues.00084",
+    "blues.00085",
+    "blues.00086",
+    "blues.00087",
+    "blues.00088",
+    "blues.00089",
+    "blues.00090",
+    "blues.00091",
+    "blues.00092",
+    "blues.00093",
+    "blues.00094",
+    "blues.00095",
+    "blues.00096",
+    "blues.00097",
+    "classical.00030",
+    "classical.00031",
+    "classical.00032",
+    "classical.00033",
+    "classical.00043",
+    "classical.00044",
+    "classical.00045",
+    "classical.00046",
+    "classical.00047",
+    "classical.00048",
+    "classical.00050",
+    "classical.00051",
+    "classical.00052",
+    "classical.00053",
+    "classical.00054",
+    "classical.00055",
+    "classical.00056",
+    "classical.00057",
+    "classical.00058",
+    "classical.00059",
+    "classical.00060",
+    "classical.00061",
+    "classical.00062",
+    "classical.00063",
+    "classical.00064",
+    "classical.00065",
+    "classical.00066",
+    "classical.00067",
+    "classical.00080",
+    "classical.00081",
+    "classical.00082",
+    "classical.00083",
+    "classical.00084",
+    "classical.00085",
+    "classical.00086",
+    "classical.00087",
+    "classical.00088",
+    "classical.00089",
+    "classical.00090",
+    "classical.00091",
+    "classical.00092",
+    "classical.00093",
+    "classical.00094",
+    "classical.00095",
+    "classical.00096",
+    "classical.00097",
+    "classical.00098",
+    "classical.00099",
+    "country.00019",
+    "country.00020",
+    "country.00021",
+    "country.00022",
+    "country.00023",
+    "country.00024",
+    "country.00025",
+    "country.00026",
+    "country.00028",
+    "country.00029",
+    "country.00065",
+    "country.00066",
+    "country.00067",
+    "country.00068",
+    "country.00069",
+    "country.00070",
+    "country.00071",
+    "country.00072",
+    "country.00073",
+    "country.00074",
+    "country.00075",
+    "country.00076",
+    "country.00077",
+    "country.00078",
+    "country.00079",
+    "country.00080",
+    "country.00081",
+    "country.00082",
+    "country.00083",
+    "country.00084",
+    "country.00085",
+    "country.00086",
+    "country.00087",
+    "country.00088",
+    "country.00089",
+    "country.00090",
+    "country.00091",
+    "country.00092",
+    "country.00093",
+    "country.00094",
+    "country.00095",
+    "country.00096",
+    "country.00097",
+    "country.00098",
+    "country.00099",
+    "disco.00005",
+    "disco.00015",
+    "disco.00016",
+    "disco.00017",
+    "disco.00018",
+    "disco.00019",
+    "disco.00020",
+    "disco.00022",
+    "disco.00023",
+    "disco.00024",
+    "disco.00025",
+    "disco.00026",
+    "disco.00027",
+    "disco.00028",
+    "disco.00029",
+    "disco.00030",
+    "disco.00031",
+    "disco.00032",
+    "disco.00033",
+    "disco.00034",
+    "disco.00035",
+    "disco.00036",
+    "disco.00037",
+    "disco.00039",
+    "disco.00040",
+    "disco.00041",
+    "disco.00042",
+    "disco.00043",
+    "disco.00044",
+    "disco.00045",
+    "disco.00047",
+    "disco.00049",
+    "disco.00053",
+    "disco.00054",
+    "disco.00056",
+    "disco.00057",
+    "disco.00059",
+    "disco.00061",
+    "disco.00070",
+    "disco.00073",
+    "disco.00074",
+    "disco.00089",
+    "hiphop.00002",
+    "hiphop.00003",
+    "hiphop.00004",
+    "hiphop.00005",
+    "hiphop.00006",
+    "hiphop.00007",
+    "hiphop.00008",
+    "hiphop.00009",
+    "hiphop.00010",
+    "hiphop.00011",
+    "hiphop.00012",
+    "hiphop.00013",
+    "hiphop.00014",
+    "hiphop.00015",
+    "hiphop.00016",
+    "hiphop.00017",
+    "hiphop.00018",
+    "hiphop.00019",
+    "hiphop.00020",
+    "hiphop.00021",
+    "hiphop.00022",
+    "hiphop.00023",
+    "hiphop.00024",
+    "hiphop.00025",
+    "hiphop.00028",
+    "hiphop.00029",
+    "hiphop.00031",
+    "hiphop.00032",
+    "hiphop.00033",
+    "hiphop.00034",
+    "hiphop.00035",
+    "hiphop.00036",
+    "hiphop.00037",
+    "hiphop.00038",
+    "hiphop.00041",
+    "hiphop.00042",
+    "hiphop.00055",
+    "hiphop.00056",
+    "hiphop.00057",
+    "hiphop.00058",
+    "hiphop.00059",
+    "hiphop.00060",
+    "hiphop.00061",
+    "hiphop.00077",
+    "hiphop.00078",
+    "hiphop.00079",
+    "hiphop.00080",
+    "jazz.00000",
+    "jazz.00001",
+    "jazz.00011",
+    "jazz.00012",
+    "jazz.00013",
+    "jazz.00014",
+    "jazz.00015",
+    "jazz.00016",
+    "jazz.00017",
+    "jazz.00018",
+    "jazz.00019",
+    "jazz.00020",
+    "jazz.00021",
+    "jazz.00022",
+    "jazz.00023",
+    "jazz.00024",
+    "jazz.00041",
+    "jazz.00047",
+    "jazz.00048",
+    "jazz.00049",
+    "jazz.00050",
+    "jazz.00051",
+    "jazz.00052",
+    "jazz.00053",
+    "jazz.00054",
+    "jazz.00055",
+    "jazz.00056",
+    "jazz.00057",
+    "jazz.00058",
+    "jazz.00059",
+    "jazz.00060",
+    "jazz.00061",
+    "jazz.00062",
+    "jazz.00063",
+    "jazz.00064",
+    "jazz.00065",
+    "jazz.00066",
+    "jazz.00067",
+    "jazz.00068",
+    "jazz.00069",
+    "jazz.00070",
+    "jazz.00071",
+    "jazz.00072",
+    "metal.00002",
+    "metal.00003",
+    "metal.00005",
+    "metal.00021",
+    "metal.00024",
+    "metal.00035",
+    "metal.00046",
+    "metal.00047",
+    "metal.00048",
+    "metal.00049",
+    "metal.00050",
+    "metal.00051",
+    "metal.00052",
+    "metal.00053",
+    "metal.00054",
+    "metal.00055",
+    "metal.00056",
+    "metal.00057",
+    "metal.00059",
+    "metal.00060",
+    "metal.00061",
+    "metal.00062",
+    "metal.00063",
+    "metal.00064",
+    "metal.00065",
+    "metal.00066",
+    "metal.00069",
+    "metal.00071",
+    "metal.00072",
+    "metal.00079",
+    "metal.00080",
+    "metal.00084",
+    "metal.00086",
+    "metal.00089",
+    "metal.00090",
+    "metal.00091",
+    "metal.00092",
+    "metal.00093",
+    "metal.00094",
+    "metal.00095",
+    "metal.00096",
+    "metal.00097",
+    "metal.00098",
+    "metal.00099",
+    "pop.00002",
+    "pop.00003",
+    "pop.00004",
+    "pop.00005",
+    "pop.00006",
+    "pop.00007",
+    "pop.00008",
+    "pop.00009",
+    "pop.00011",
+    "pop.00012",
+    "pop.00016",
+    "pop.00017",
+    "pop.00018",
+    "pop.00019",
+    "pop.00020",
+    "pop.00023",
+    "pop.00024",
+    "pop.00025",
+    "pop.00026",
+    "pop.00027",
+    "pop.00028",
+    "pop.00029",
+    "pop.00031",
+    "pop.00032",
+    "pop.00033",
+    "pop.00034",
+    "pop.00035",
+    "pop.00036",
+    "pop.00038",
+    "pop.00039",
+    "pop.00040",
+    "pop.00041",
+    "pop.00042",
+    "pop.00044",
+    "pop.00046",
+    "pop.00049",
+    "pop.00050",
+    "pop.00080",
+    "pop.00097",
+    "pop.00098",
+    "pop.00099",
+    "reggae.00000",
+    "reggae.00001",
+    "reggae.00002",
+    "reggae.00004",
+    "reggae.00006",
+    "reggae.00009",
+    "reggae.00011",
+    "reggae.00012",
+    "reggae.00014",
+    "reggae.00015",
+    "reggae.00016",
+    "reggae.00017",
+    "reggae.00018",
+    "reggae.00019",
+    "reggae.00020",
+    "reggae.00021",
+    "reggae.00022",
+    "reggae.00023",
+    "reggae.00024",
+    "reggae.00025",
+    "reggae.00026",
+    "reggae.00027",
+    "reggae.00028",
+    "reggae.00029",
+    "reggae.00030",
+    "reggae.00031",
+    "reggae.00032",
+    "reggae.00042",
+    "reggae.00043",
+    "reggae.00044",
+    "reggae.00045",
+    "reggae.00049",
+    "reggae.00050",
+    "reggae.00051",
+    "reggae.00054",
+    "reggae.00055",
+    "reggae.00056",
+    "reggae.00057",
+    "reggae.00058",
+    "reggae.00059",
+    "reggae.00060",
+    "reggae.00063",
+    "reggae.00069",
+    "rock.00000",
+    "rock.00001",
+    "rock.00002",
+    "rock.00003",
+    "rock.00004",
+    "rock.00005",
+    "rock.00006",
+    "rock.00007",
+    "rock.00008",
+    "rock.00009",
+    "rock.00016",
+    "rock.00017",
+    "rock.00018",
+    "rock.00019",
+    "rock.00020",
+    "rock.00021",
+    "rock.00022",
+    "rock.00023",
+    "rock.00024",
+    "rock.00025",
+    "rock.00026",
+    "rock.00057",
+    "rock.00058",
+    "rock.00059",
+    "rock.00060",
+    "rock.00061",
+    "rock.00062",
+    "rock.00063",
+    "rock.00064",
+    "rock.00065",
+    "rock.00066",
+    "rock.00067",
+    "rock.00068",
+    "rock.00069",
+    "rock.00070",
+    "rock.00091",
+    "rock.00092",
+    "rock.00093",
+    "rock.00094",
+    "rock.00095",
+    "rock.00096",
+    "rock.00097",
+    "rock.00098",
+    "rock.00099",
+]
+
+filtered_valid = [
+    "blues.00000",
+    "blues.00001",
+    "blues.00002",
+    "blues.00003",
+    "blues.00004",
+    "blues.00005",
+    "blues.00006",
+    "blues.00007",
+    "blues.00008",
+    "blues.00009",
+    "blues.00010",
+    "blues.00011",
+    "blues.00050",
+    "blues.00051",
+    "blues.00052",
+    "blues.00053",
+    "blues.00054",
+    "blues.00055",
+    "blues.00056",
+    "blues.00057",
+    "blues.00058",
+    "blues.00059",
+    "blues.00060",
+    "classical.00000",
+    "classical.00001",
+    "classical.00002",
+    "classical.00003",
+    "classical.00004",
+    "classical.00005",
+    "classical.00006",
+    "classical.00007",
+    "classical.00008",
+    "classical.00009",
+    "classical.00010",
+    "classical.00068",
+    "classical.00069",
+    "classical.00070",
+    "classical.00071",
+    "classical.00072",
+    "classical.00073",
+    "classical.00074",
+    "classical.00075",
+    "classical.00076",
+    "country.00000",
+    "country.00001",
+    "country.00002",
+    "country.00003",
+    "country.00004",
+    "country.00005",
+    "country.00006",
+    "country.00007",
+    "country.00009",
+    "country.00010",
+    "country.00011",
+    "country.00012",
+    "country.00013",
+    "country.00014",
+    "country.00015",
+    "country.00016",
+    "country.00017",
+    "country.00018",
+    "country.00027",
+    "country.00041",
+    "country.00042",
+    "country.00045",
+    "country.00049",
+    "disco.00000",
+    "disco.00002",
+    "disco.00003",
+    "disco.00004",
+    "disco.00006",
+    "disco.00007",
+    "disco.00008",
+    "disco.00009",
+    "disco.00010",
+    "disco.00011",
+    "disco.00012",
+    "disco.00013",
+    "disco.00014",
+    "disco.00046",
+    "disco.00048",
+    "disco.00052",
+    "disco.00067",
+    "disco.00068",
+    "disco.00072",
+    "disco.00075",
+    "disco.00090",
+    "disco.00095",
+    "hiphop.00081",
+    "hiphop.00082",
+    "hiphop.00083",
+    "hiphop.00084",
+    "hiphop.00085",
+    "hiphop.00086",
+    "hiphop.00087",
+    "hiphop.00088",
+    "hiphop.00089",
+    "hiphop.00090",
+    "hiphop.00091",
+    "hiphop.00092",
+    "hiphop.00093",
+    "hiphop.00094",
+    "hiphop.00095",
+    "hiphop.00096",
+    "hiphop.00097",
+    "hiphop.00098",
+    "jazz.00002",
+    "jazz.00003",
+    "jazz.00004",
+    "jazz.00005",
+    "jazz.00006",
+    "jazz.00007",
+    "jazz.00008",
+    "jazz.00009",
+    "jazz.00010",
+    "jazz.00025",
+    "jazz.00026",
+    "jazz.00027",
+    "jazz.00028",
+    "jazz.00029",
+    "jazz.00030",
+    "jazz.00031",
+    "jazz.00032",
+    "metal.00000",
+    "metal.00001",
+    "metal.00006",
+    "metal.00007",
+    "metal.00008",
+    "metal.00009",
+    "metal.00010",
+    "metal.00011",
+    "metal.00016",
+    "metal.00017",
+    "metal.00018",
+    "metal.00019",
+    "metal.00020",
+    "metal.00036",
+    "metal.00037",
+    "metal.00068",
+    "metal.00076",
+    "metal.00077",
+    "metal.00081",
+    "metal.00082",
+    "pop.00010",
+    "pop.00053",
+    "pop.00055",
+    "pop.00058",
+    "pop.00059",
+    "pop.00060",
+    "pop.00061",
+    "pop.00062",
+    "pop.00081",
+    "pop.00083",
+    "pop.00084",
+    "pop.00085",
+    "pop.00086",
+    "reggae.00061",
+    "reggae.00062",
+    "reggae.00070",
+    "reggae.00072",
+    "reggae.00074",
+    "reggae.00076",
+    "reggae.00077",
+    "reggae.00078",
+    "reggae.00085",
+    "reggae.00092",
+    "reggae.00093",
+    "reggae.00094",
+    "reggae.00095",
+    "reggae.00096",
+    "reggae.00097",
+    "reggae.00098",
+    "reggae.00099",
+    "rock.00038",
+    "rock.00049",
+    "rock.00050",
+    "rock.00051",
+    "rock.00052",
+    "rock.00053",
+    "rock.00054",
+    "rock.00055",
+    "rock.00056",
+    "rock.00071",
+    "rock.00072",
+    "rock.00073",
+    "rock.00074",
+    "rock.00075",
+    "rock.00076",
+    "rock.00077",
+    "rock.00078",
+    "rock.00079",
+    "rock.00080",
+    "rock.00081",
+    "rock.00082",
+    "rock.00083",
+    "rock.00084",
+    "rock.00085",
+]
+
+
+URL = "http://opihi.cs.uvic.ca/sound/genres.tar.gz"
+FOLDER_IN_ARCHIVE = "genres"
+_CHECKSUMS = {
+    "http://opihi.cs.uvic.ca/sound/genres.tar.gz": "5b3d6dddb579ab49814ab86dba69e7c7"
+}
+
+
+def load_gtzan_item(fileid: str, path: str, ext_audio: str) -> Tuple[Tensor, str]:
+    """
+    Loads a file from the dataset and returns the raw waveform
+    as a Torch Tensor, its sample rate as an integer, and its
+    genre as a string.
+    """
+    # Filenames are of the form label.id, e.g. blues.00078
+    label, _ = fileid.split(".")
+
+    # Read wav
+    file_audio = os.path.join(path, label, fileid + ext_audio)
+    waveform, sample_rate = torchaudio.load(file_audio)
+
+    return waveform, sample_rate, label
+
+
+
[docs]class GTZAN(Dataset): + """Create a Dataset for GTZAN. + + Note: + Please see http://marsyas.info/downloads/datasets.html if you are planning to use + this dataset to publish results. + + Args: + root (str or Path): Path to the directory where the dataset is found or downloaded. + url (str, optional): The URL to download the dataset from. + (default: ``"http://opihi.cs.uvic.ca/sound/genres.tar.gz"``) + folder_in_archive (str, optional): The top-level directory of the dataset. + download (bool, optional): + Whether to download the dataset if it is not found at root path. (default: ``False``). + subset (str, optional): Which subset of the dataset to use. + One of ``"training"``, ``"validation"``, ``"testing"`` or ``None``. + If ``None``, the entire dataset is used. (default: ``None``). + """ + + _ext_audio = ".wav" + + def __init__( + self, + root: Union[str, Path], + url: str = URL, + folder_in_archive: str = FOLDER_IN_ARCHIVE, + download: bool = False, + subset: Optional[str] = None, + ) -> None: + + # super(GTZAN, self).__init__() + + # Get string representation of 'root' in case Path object is passed + root = os.fspath(root) + + self.root = root + self.url = url + self.folder_in_archive = folder_in_archive + self.download = download + self.subset = subset + + assert subset is None or subset in ["training", "validation", "testing"], ( + "When `subset` not None, it must take a value from " + + "{'training', 'validation', 'testing'}." + ) + + archive = os.path.basename(url) + archive = os.path.join(root, archive) + self._path = os.path.join(root, folder_in_archive) + + if download: + if not os.path.isdir(self._path): + if not os.path.isfile(archive): + checksum = _CHECKSUMS.get(url, None) + download_url(url, root, hash_value=checksum, hash_type="md5") + extract_archive(archive) + + if not os.path.isdir(self._path): + raise RuntimeError( + "Dataset not found. Please use `download=True` to download it." + ) + + if self.subset is None: + # Check every subdirectory under dataset root + # which has the same name as the genres in + # GTZAN (e.g. `root_dir'/blues/, `root_dir'/rock, etc.) + # This lets users remove or move around song files, + # useful when e.g. they want to use only some of the files + # in a genre or want to label other files with a different + # genre. + self._walker = [] + + root = os.path.expanduser(self._path) + + for directory in gtzan_genres: + fulldir = os.path.join(root, directory) + + if not os.path.exists(fulldir): + continue + + songs_in_genre = os.listdir(fulldir) + songs_in_genre.sort() + for fname in songs_in_genre: + name, ext = os.path.splitext(fname) + if ext.lower() == ".wav" and "." in name: + # Check whether the file is of the form + # `gtzan_genre`.`5 digit number`.wav + genre, num = name.split(".") + if genre in gtzan_genres and len(num) == 5 and num.isdigit(): + self._walker.append(name) + else: + if self.subset == "training": + self._walker = filtered_train + elif self.subset == "validation": + self._walker = filtered_valid + elif self.subset == "testing": + self._walker = filtered_test + +
[docs] def __getitem__(self, n: int) -> Tuple[Tensor, int, str]: + """Load the n-th sample from the dataset. + + Args: + n (int): The index of the sample to be loaded + + Returns: + tuple: ``(waveform, sample_rate, label)`` + """ + fileid = self._walker[n] + item = load_gtzan_item(fileid, self._path, self._ext_audio) + waveform, sample_rate, label = item + return waveform, sample_rate, label
+ + def __len__(self) -> int: + return len(self._walker)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/datasets/librispeech.html b/0.9.0/_modules/torchaudio/datasets/librispeech.html new file mode 100644 index 0000000000..64e5444589 --- /dev/null +++ b/0.9.0/_modules/torchaudio/datasets/librispeech.html @@ -0,0 +1,765 @@ + + + + + + + + + + + + torchaudio.datasets.librispeech — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.datasets.librispeech

+import os
+from typing import Tuple, Union
+from pathlib import Path
+
+import torchaudio
+from torch import Tensor
+from torch.utils.data import Dataset
+from torchaudio.datasets.utils import (
+    download_url,
+    extract_archive,
+)
+
+URL = "train-clean-100"
+FOLDER_IN_ARCHIVE = "LibriSpeech"
+_CHECKSUMS = {
+    "http://www.openslr.org/resources/12/dev-clean.tar.gz":
+    "76f87d090650617fca0cac8f88b9416e0ebf80350acb97b343a85fa903728ab3",
+    "http://www.openslr.org/resources/12/dev-other.tar.gz":
+    "12661c48e8c3fe1de2c1caa4c3e135193bfb1811584f11f569dd12645aa84365",
+    "http://www.openslr.org/resources/12/test-clean.tar.gz":
+    "39fde525e59672dc6d1551919b1478f724438a95aa55f874b576be21967e6c23",
+    "http://www.openslr.org/resources/12/test-other.tar.gz":
+    "d09c181bba5cf717b3dee7d4d592af11a3ee3a09e08ae025c5506f6ebe961c29",
+    "http://www.openslr.org/resources/12/train-clean-100.tar.gz":
+    "d4ddd1d5a6ab303066f14971d768ee43278a5f2a0aa43dc716b0e64ecbbbf6e2",
+    "http://www.openslr.org/resources/12/train-clean-360.tar.gz":
+    "146a56496217e96c14334a160df97fffedd6e0a04e66b9c5af0d40be3c792ecf",
+    "http://www.openslr.org/resources/12/train-other-500.tar.gz":
+    "ddb22f27f96ec163645d53215559df6aa36515f26e01dd70798188350adcb6d2"
+}
+
+
+def load_librispeech_item(fileid: str,
+                          path: str,
+                          ext_audio: str,
+                          ext_txt: str) -> Tuple[Tensor, int, str, int, int, int]:
+    speaker_id, chapter_id, utterance_id = fileid.split("-")
+
+    file_text = speaker_id + "-" + chapter_id + ext_txt
+    file_text = os.path.join(path, speaker_id, chapter_id, file_text)
+
+    fileid_audio = speaker_id + "-" + chapter_id + "-" + utterance_id
+    file_audio = fileid_audio + ext_audio
+    file_audio = os.path.join(path, speaker_id, chapter_id, file_audio)
+
+    # Load audio
+    waveform, sample_rate = torchaudio.load(file_audio)
+
+    # Load text
+    with open(file_text) as ft:
+        for line in ft:
+            fileid_text, utterance = line.strip().split(" ", 1)
+            if fileid_audio == fileid_text:
+                break
+        else:
+            # Translation not found
+            raise FileNotFoundError("Translation not found for " + fileid_audio)
+
+    return (
+        waveform,
+        sample_rate,
+        utterance,
+        int(speaker_id),
+        int(chapter_id),
+        int(utterance_id),
+    )
+
+
+
[docs]class LIBRISPEECH(Dataset): + """Create a Dataset for LibriSpeech. + + Args: + root (str or Path): Path to the directory where the dataset is found or downloaded. + url (str, optional): The URL to download the dataset from, + or the type of the dataset to dowload. + Allowed type values are ``"dev-clean"``, ``"dev-other"``, ``"test-clean"``, + ``"test-other"``, ``"train-clean-100"``, ``"train-clean-360"`` and + ``"train-other-500"``. (default: ``"train-clean-100"``) + folder_in_archive (str, optional): + The top-level directory of the dataset. (default: ``"LibriSpeech"``) + download (bool, optional): + Whether to download the dataset if it is not found at root path. (default: ``False``). + """ + + _ext_txt = ".trans.txt" + _ext_audio = ".flac" + + def __init__(self, + root: Union[str, Path], + url: str = URL, + folder_in_archive: str = FOLDER_IN_ARCHIVE, + download: bool = False) -> None: + + if url in [ + "dev-clean", + "dev-other", + "test-clean", + "test-other", + "train-clean-100", + "train-clean-360", + "train-other-500", + ]: + + ext_archive = ".tar.gz" + base_url = "http://www.openslr.org/resources/12/" + + url = os.path.join(base_url, url + ext_archive) + + # Get string representation of 'root' in case Path object is passed + root = os.fspath(root) + + basename = os.path.basename(url) + archive = os.path.join(root, basename) + + basename = basename.split(".")[0] + folder_in_archive = os.path.join(folder_in_archive, basename) + + self._path = os.path.join(root, folder_in_archive) + + if download: + if not os.path.isdir(self._path): + if not os.path.isfile(archive): + checksum = _CHECKSUMS.get(url, None) + download_url(url, root, hash_value=checksum) + extract_archive(archive) + + self._walker = sorted(str(p.stem) for p in Path(self._path).glob('*/*/*' + self._ext_audio)) + +
[docs] def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]: + """Load the n-th sample from the dataset. + + Args: + n (int): The index of the sample to be loaded + + Returns: + tuple: ``(waveform, sample_rate, utterance, speaker_id, chapter_id, utterance_id)`` + """ + fileid = self._walker[n] + return load_librispeech_item(fileid, self._path, self._ext_audio, self._ext_txt)
+ + def __len__(self) -> int: + return len(self._walker)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/datasets/libritts.html b/0.9.0/_modules/torchaudio/datasets/libritts.html new file mode 100644 index 0000000000..7b5a4aab4d --- /dev/null +++ b/0.9.0/_modules/torchaudio/datasets/libritts.html @@ -0,0 +1,773 @@ + + + + + + + + + + + + torchaudio.datasets.libritts — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.datasets.libritts

+import os
+from typing import Tuple, Union
+from pathlib import Path
+
+import torchaudio
+from torch import Tensor
+from torch.utils.data import Dataset
+from torchaudio.datasets.utils import (
+    download_url,
+    extract_archive,
+)
+
+URL = "train-clean-100"
+FOLDER_IN_ARCHIVE = "LibriTTS"
+_CHECKSUMS = {
+    "http://www.openslr.org/60/dev-clean.tar.gz": "0c3076c1e5245bb3f0af7d82087ee207",
+    "http://www.openslr.org/60/dev-other.tar.gz": "815555d8d75995782ac3ccd7f047213d",
+    "http://www.openslr.org/60/test-clean.tar.gz": "7bed3bdb047c4c197f1ad3bc412db59f",
+    "http://www.openslr.org/60/test-other.tar.gz": "ae3258249472a13b5abef2a816f733e4",
+    "http://www.openslr.org/60/train-clean-100.tar.gz": "4a8c202b78fe1bc0c47916a98f3a2ea8",
+    "http://www.openslr.org/60/train-clean-360.tar.gz": "a84ef10ddade5fd25df69596a2767b2d",
+    "http://www.openslr.org/60/train-other-500.tar.gz": "7b181dd5ace343a5f38427999684aa6f",
+}
+
+
+def load_libritts_item(
+    fileid: str,
+    path: str,
+    ext_audio: str,
+    ext_original_txt: str,
+    ext_normalized_txt: str,
+) -> Tuple[Tensor, int, str, str, int, int, str]:
+    speaker_id, chapter_id, segment_id, utterance_id = fileid.split("_")
+    utterance_id = fileid
+
+    normalized_text = utterance_id + ext_normalized_txt
+    normalized_text = os.path.join(path, speaker_id, chapter_id, normalized_text)
+
+    original_text = utterance_id + ext_original_txt
+    original_text = os.path.join(path, speaker_id, chapter_id, original_text)
+
+    file_audio = utterance_id + ext_audio
+    file_audio = os.path.join(path, speaker_id, chapter_id, file_audio)
+
+    # Load audio
+    waveform, sample_rate = torchaudio.load(file_audio)
+
+    # Load original text
+    with open(original_text) as ft:
+        original_text = ft.readline()
+
+    # Load normalized text
+    with open(normalized_text, "r") as ft:
+        normalized_text = ft.readline()
+
+    return (
+        waveform,
+        sample_rate,
+        original_text,
+        normalized_text,
+        int(speaker_id),
+        int(chapter_id),
+        utterance_id,
+    )
+
+
+
[docs]class LIBRITTS(Dataset): + """Create a Dataset for LibriTTS. + + Args: + root (str or Path): Path to the directory where the dataset is found or downloaded. + url (str, optional): The URL to download the dataset from, + or the type of the dataset to dowload. + Allowed type values are ``"dev-clean"``, ``"dev-other"``, ``"test-clean"``, + ``"test-other"``, ``"train-clean-100"``, ``"train-clean-360"`` and + ``"train-other-500"``. (default: ``"train-clean-100"``) + folder_in_archive (str, optional): + The top-level directory of the dataset. (default: ``"LibriTTS"``) + download (bool, optional): + Whether to download the dataset if it is not found at root path. (default: ``False``). + """ + + _ext_original_txt = ".original.txt" + _ext_normalized_txt = ".normalized.txt" + _ext_audio = ".wav" + + def __init__( + self, + root: Union[str, Path], + url: str = URL, + folder_in_archive: str = FOLDER_IN_ARCHIVE, + download: bool = False, + ) -> None: + + if url in [ + "dev-clean", + "dev-other", + "test-clean", + "test-other", + "train-clean-100", + "train-clean-360", + "train-other-500", + ]: + + ext_archive = ".tar.gz" + base_url = "http://www.openslr.org/resources/60/" + + url = os.path.join(base_url, url + ext_archive) + + # Get string representation of 'root' in case Path object is passed + root = os.fspath(root) + + basename = os.path.basename(url) + archive = os.path.join(root, basename) + + basename = basename.split(".")[0] + folder_in_archive = os.path.join(folder_in_archive, basename) + + self._path = os.path.join(root, folder_in_archive) + + if download: + if not os.path.isdir(self._path): + if not os.path.isfile(archive): + checksum = _CHECKSUMS.get(url, None) + download_url(url, root, hash_value=checksum) + extract_archive(archive) + + self._walker = sorted(str(p.stem) for p in Path(self._path).glob('*/*/*' + self._ext_audio)) + +
[docs] def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, int, int, str]: + """Load the n-th sample from the dataset. + + Args: + n (int): The index of the sample to be loaded + + Returns: + tuple: ``(waveform, sample_rate, original_text, normalized_text, speaker_id, + chapter_id, utterance_id)`` + """ + fileid = self._walker[n] + return load_libritts_item( + fileid, + self._path, + self._ext_audio, + self._ext_original_txt, + self._ext_normalized_txt, + )
+ + def __len__(self) -> int: + return len(self._walker)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/datasets/ljspeech.html b/0.9.0/_modules/torchaudio/datasets/ljspeech.html new file mode 100644 index 0000000000..61ad946ba4 --- /dev/null +++ b/0.9.0/_modules/torchaudio/datasets/ljspeech.html @@ -0,0 +1,711 @@ + + + + + + + + + + + + torchaudio.datasets.ljspeech — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.datasets.ljspeech

+import os
+import csv
+from typing import Tuple, Union
+from pathlib import Path
+
+import torchaudio
+from torchaudio.datasets.utils import download_url, extract_archive
+from torch import Tensor
+from torch.utils.data import Dataset
+
+_RELEASE_CONFIGS = {
+    "release1": {
+        "folder_in_archive": "wavs",
+        "url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
+        "checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
+    }
+}
+
+
+
[docs]class LJSPEECH(Dataset): + """Create a Dataset for LJSpeech-1.1. + + Args: + root (str or Path): Path to the directory where the dataset is found or downloaded. + url (str, optional): The URL to download the dataset from. + (default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``) + folder_in_archive (str, optional): + The top-level directory of the dataset. (default: ``"wavs"``) + download (bool, optional): + Whether to download the dataset if it is not found at root path. (default: ``False``). + """ + + def __init__(self, + root: Union[str, Path], + url: str = _RELEASE_CONFIGS["release1"]["url"], + folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"], + download: bool = False) -> None: + + self._parse_filesystem(root, url, folder_in_archive, download) + + def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None: + root = Path(root) + + basename = os.path.basename(url) + archive = root / basename + + basename = Path(basename.split(".tar.bz2")[0]) + folder_in_archive = basename / folder_in_archive + + self._path = root / folder_in_archive + self._metadata_path = root / basename / 'metadata.csv' + + if download: + if not os.path.isdir(self._path): + if not os.path.isfile(archive): + checksum = _RELEASE_CONFIGS["release1"]["checksum"] + download_url(url, root, hash_value=checksum) + extract_archive(archive) + + with open(self._metadata_path, "r", newline='') as metadata: + flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE) + self._flist = list(flist) + +
[docs] def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]: + """Load the n-th sample from the dataset. + + Args: + n (int): The index of the sample to be loaded + + Returns: + tuple: ``(waveform, sample_rate, transcript, normalized_transcript)`` + """ + line = self._flist[n] + fileid, transcript, normalized_transcript = line + fileid_audio = self._path / (fileid + ".wav") + + # Load audio + waveform, sample_rate = torchaudio.load(fileid_audio) + + return ( + waveform, + sample_rate, + transcript, + normalized_transcript, + )
+ + def __len__(self) -> int: + return len(self._flist)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/datasets/speechcommands.html b/0.9.0/_modules/torchaudio/datasets/speechcommands.html new file mode 100644 index 0000000000..362c1725fd --- /dev/null +++ b/0.9.0/_modules/torchaudio/datasets/speechcommands.html @@ -0,0 +1,770 @@ + + + + + + + + + + + + torchaudio.datasets.speechcommands — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.datasets.speechcommands

+import os
+from typing import Tuple, Optional, Union
+from pathlib import Path
+
+import torchaudio
+from torch.utils.data import Dataset
+from torch import Tensor
+from torchaudio.datasets.utils import (
+    download_url,
+    extract_archive,
+)
+
+FOLDER_IN_ARCHIVE = "SpeechCommands"
+URL = "speech_commands_v0.02"
+HASH_DIVIDER = "_nohash_"
+EXCEPT_FOLDER = "_background_noise_"
+_CHECKSUMS = {
+    "https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.01.tar.gz":
+    "3cd23799cb2bbdec517f1cc028f8d43c",
+    "https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.02.tar.gz":
+    "6b74f3901214cb2c2934e98196829835",
+}
+
+
+def _load_list(root, *filenames):
+    output = []
+    for filename in filenames:
+        filepath = os.path.join(root, filename)
+        with open(filepath) as fileobj:
+            output += [os.path.normpath(os.path.join(root, line.strip())) for line in fileobj]
+    return output
+
+
+def load_speechcommands_item(filepath: str, path: str) -> Tuple[Tensor, int, str, str, int]:
+    relpath = os.path.relpath(filepath, path)
+    label, filename = os.path.split(relpath)
+    # Besides the officially supported split method for datasets defined by "validation_list.txt"
+    # and "testing_list.txt" over "speech_commands_v0.0x.tar.gz" archives, an alternative split
+    # method referred to in paragraph 2-3 of Section 7.1, references 13 and 14 of the original
+    # paper, and the checksums file from the tensorflow_datasets package [1] is also supported.
+    # Some filenames in those "speech_commands_test_set_v0.0x.tar.gz" archives have the form
+    # "xxx.wav.wav", so file extensions twice needs to be stripped twice.
+    # [1] https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/url_checksums/speech_commands.txt
+    speaker, _ = os.path.splitext(filename)
+    speaker, _ = os.path.splitext(speaker)
+
+    speaker_id, utterance_number = speaker.split(HASH_DIVIDER)
+    utterance_number = int(utterance_number)
+
+    # Load audio
+    waveform, sample_rate = torchaudio.load(filepath)
+    return waveform, sample_rate, label, speaker_id, utterance_number
+
+
+
[docs]class SPEECHCOMMANDS(Dataset): + """Create a Dataset for Speech Commands. + + Args: + root (str or Path): Path to the directory where the dataset is found or downloaded. + url (str, optional): The URL to download the dataset from, + or the type of the dataset to dowload. + Allowed type values are ``"speech_commands_v0.01"`` and ``"speech_commands_v0.02"`` + (default: ``"speech_commands_v0.02"``) + folder_in_archive (str, optional): + The top-level directory of the dataset. (default: ``"SpeechCommands"``) + download (bool, optional): + Whether to download the dataset if it is not found at root path. (default: ``False``). + subset (Optional[str]): + Select a subset of the dataset [None, "training", "validation", "testing"]. None means + the whole dataset. "validation" and "testing" are defined in "validation_list.txt" and + "testing_list.txt", respectively, and "training" is the rest. Details for the files + "validation_list.txt" and "testing_list.txt" are explained in the README of the dataset + and in the introduction of Section 7 of the original paper and its reference 12. The + original paper can be found `here <https://arxiv.org/pdf/1804.03209.pdf>`_. (Default: ``None``) + """ + + def __init__(self, + root: Union[str, Path], + url: str = URL, + folder_in_archive: str = FOLDER_IN_ARCHIVE, + download: bool = False, + subset: Optional[str] = None, + ) -> None: + + assert subset is None or subset in ["training", "validation", "testing"], ( + "When `subset` not None, it must take a value from " + + "{'training', 'validation', 'testing'}." + ) + + if url in [ + "speech_commands_v0.01", + "speech_commands_v0.02", + ]: + base_url = "https://storage.googleapis.com/download.tensorflow.org/data/" + ext_archive = ".tar.gz" + + url = os.path.join(base_url, url + ext_archive) + + # Get string representation of 'root' in case Path object is passed + root = os.fspath(root) + + basename = os.path.basename(url) + archive = os.path.join(root, basename) + + basename = basename.rsplit(".", 2)[0] + folder_in_archive = os.path.join(folder_in_archive, basename) + + self._path = os.path.join(root, folder_in_archive) + + if download: + if not os.path.isdir(self._path): + if not os.path.isfile(archive): + checksum = _CHECKSUMS.get(url, None) + download_url(url, root, hash_value=checksum, hash_type="md5") + extract_archive(archive, self._path) + + if subset == "validation": + self._walker = _load_list(self._path, "validation_list.txt") + elif subset == "testing": + self._walker = _load_list(self._path, "testing_list.txt") + elif subset == "training": + excludes = set(_load_list(self._path, "validation_list.txt", "testing_list.txt")) + walker = sorted(str(p) for p in Path(self._path).glob('*/*.wav')) + self._walker = [ + w for w in walker + if HASH_DIVIDER in w + and EXCEPT_FOLDER not in w + and os.path.normpath(w) not in excludes + ] + else: + walker = sorted(str(p) for p in Path(self._path).glob('*/*.wav')) + self._walker = [w for w in walker if HASH_DIVIDER in w and EXCEPT_FOLDER not in w] + +
[docs] def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, int]: + """Load the n-th sample from the dataset. + + Args: + n (int): The index of the sample to be loaded + + Returns: + tuple: ``(waveform, sample_rate, label, speaker_id, utterance_number)`` + """ + fileid = self._walker[n] + return load_speechcommands_item(fileid, self._path)
+ + def __len__(self) -> int: + return len(self._walker)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/datasets/tedlium.html b/0.9.0/_modules/torchaudio/datasets/tedlium.html new file mode 100644 index 0000000000..31a8897869 --- /dev/null +++ b/0.9.0/_modules/torchaudio/datasets/tedlium.html @@ -0,0 +1,815 @@ + + + + + + + + + + + + torchaudio.datasets.tedlium — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.datasets.tedlium

+import os
+from typing import Tuple, Union
+from pathlib import Path
+
+import torchaudio
+from torch import Tensor
+from torch.utils.data import Dataset
+from torchaudio.datasets.utils import (
+    download_url,
+    extract_archive,
+)
+
+
+_RELEASE_CONFIGS = {
+    "release1": {
+        "folder_in_archive": "TEDLIUM_release1",
+        "url": "http://www.openslr.org/resources/7/TEDLIUM_release1.tar.gz",
+        "checksum": "30301975fd8c5cac4040c261c0852f57cfa8adbbad2ce78e77e4986957445f27",
+        "data_path": "",
+        "subset": "train",
+        "supported_subsets": ["train", "test", "dev"],
+        "dict": "TEDLIUM.150K.dic",
+    },
+    "release2": {
+        "folder_in_archive": "TEDLIUM_release2",
+        "url": "http://www.openslr.org/resources/19/TEDLIUM_release2.tar.gz",
+        "checksum": "93281b5fcaaae5c88671c9d000b443cb3c7ea3499ad12010b3934ca41a7b9c58",
+        "data_path": "",
+        "subset": "train",
+        "supported_subsets": ["train", "test", "dev"],
+        "dict": "TEDLIUM.152k.dic",
+    },
+    "release3": {
+        "folder_in_archive": "TEDLIUM_release-3",
+        "url": "http://www.openslr.org/resources/51/TEDLIUM_release-3.tgz",
+        "checksum": "ad1e454d14d1ad550bc2564c462d87c7a7ec83d4dc2b9210f22ab4973b9eccdb",
+        "data_path": "data/",
+        "subset": None,
+        "supported_subsets": [None],
+        "dict": "TEDLIUM.152k.dic",
+    },
+}
+
+
+
[docs]class TEDLIUM(Dataset): + """ + Create a Dataset for Tedlium. It supports releases 1,2 and 3. + + Args: + root (str or Path): Path to the directory where the dataset is found or downloaded. + release (str, optional): Release version. + Allowed values are ``"release1"``, ``"release2"`` or ``"release3"``. + (default: ``"release1"``). + subset (str, optional): The subset of dataset to use. Valid options are ``"train"``, ``"dev"``, + and ``"test"`` for releases 1&2, ``None`` for release3. Defaults to ``"train"`` or ``None``. + download (bool, optional): + Whether to download the dataset if it is not found at root path. (default: ``False``). + """ + def __init__( + self, + root: Union[str, Path], + release: str = "release1", + subset: str = None, + download: bool = False, + audio_ext=".sph" + ) -> None: + self._ext_audio = audio_ext + if release in _RELEASE_CONFIGS.keys(): + folder_in_archive = _RELEASE_CONFIGS[release]["folder_in_archive"] + url = _RELEASE_CONFIGS[release]["url"] + subset = subset if subset else _RELEASE_CONFIGS[release]["subset"] + else: + # Raise warning + raise RuntimeError( + "The release {} does not match any of the supported tedlium releases{} ".format( + release, _RELEASE_CONFIGS.keys(), + ) + ) + if subset not in _RELEASE_CONFIGS[release]["supported_subsets"]: + # Raise warning + raise RuntimeError( + "The subset {} does not match any of the supported tedlium subsets{} ".format( + subset, _RELEASE_CONFIGS[release]["supported_subsets"], + ) + ) + + # Get string representation of 'root' in case Path object is passed + root = os.fspath(root) + + basename = os.path.basename(url) + archive = os.path.join(root, basename) + + basename = basename.split(".")[0] + + self._path = os.path.join(root, folder_in_archive, _RELEASE_CONFIGS[release]["data_path"]) + if subset in ["train", "dev", "test"]: + self._path = os.path.join(self._path, subset) + + if download: + if not os.path.isdir(self._path): + if not os.path.isfile(archive): + checksum = _RELEASE_CONFIGS[release]["checksum"] + download_url(url, root, hash_value=checksum) + extract_archive(archive) + + # Create list for all samples + self._filelist = [] + stm_path = os.path.join(self._path, "stm") + for file in sorted(os.listdir(stm_path)): + if file.endswith(".stm"): + stm_path = os.path.join(self._path, "stm", file) + with open(stm_path) as f: + l = len(f.readlines()) + file = file.replace(".stm", "") + self._filelist.extend((file, line) for line in range(l)) + # Create dict path for later read + self._dict_path = os.path.join(root, folder_in_archive, _RELEASE_CONFIGS[release]["dict"]) + self._phoneme_dict = None + + def _load_tedlium_item(self, fileid: str, line: int, path: str) -> Tuple[Tensor, int, str, int, int, int]: + """Loads a TEDLIUM dataset sample given a file name and corresponding sentence name. + + Args: + fileid (str): File id to identify both text and audio files corresponding to the sample + line (int): Line identifier for the sample inside the text file + path (str): Dataset root path + + Returns: + tuple: ``(waveform, sample_rate, transcript, talk_id, speaker_id, identifier)`` + """ + transcript_path = os.path.join(path, "stm", fileid) + with open(transcript_path + ".stm") as f: + transcript = f.readlines()[line] + talk_id, _, speaker_id, start_time, end_time, identifier, transcript = transcript.split(" ", 6) + + wave_path = os.path.join(path, "sph", fileid) + waveform, sample_rate = self._load_audio(wave_path + self._ext_audio, start_time=start_time, end_time=end_time) + + return (waveform, sample_rate, transcript, talk_id, speaker_id, identifier) + + def _load_audio(self, path: str, start_time: float, end_time: float, sample_rate: int = 16000) -> [Tensor, int]: + """Default load function used in TEDLIUM dataset, you can overwrite this function to customize functionality + and load individual sentences from a full ted audio talk file. + + Args: + path (str): Path to audio file + start_time (int, optional): Time in seconds where the sample sentence stars + end_time (int, optional): Time in seconds where the sample sentence finishes + + Returns: + [Tensor, int]: Audio tensor representation and sample rate + """ + start_time = int(float(start_time) * sample_rate) + end_time = int(float(end_time) * sample_rate) + + kwargs = {"frame_offset": start_time, "num_frames": end_time - start_time} + + return torchaudio.load(path, **kwargs) + +
[docs] def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]: + """Load the n-th sample from the dataset. + + Args: + n (int): The index of the sample to be loaded + + Returns: + tuple: ``(waveform, sample_rate, transcript, talk_id, speaker_id, identifier)`` + """ + fileid, line = self._filelist[n] + return self._load_tedlium_item(fileid, line, self._path)
+ + def __len__(self) -> int: + """TEDLIUM dataset custom function overwritting len default behaviour. + + Returns: + int: TEDLIUM dataset length + """ + return len(self._filelist) + + @property + def phoneme_dict(self): + """dict[str, tuple[str]]: Phonemes. Mapping from word to tuple of phonemes. + Note that some words have empty phonemes. + """ + # Read phoneme dictionary + if not self._phoneme_dict: + self._phoneme_dict = {} + with open(self._dict_path, "r", encoding="utf-8") as f: + for line in f.readlines(): + content = line.strip().split() + self._phoneme_dict[content[0]] = tuple(content[1:]) # content[1:] can be empty list + return self._phoneme_dict.copy()
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/datasets/vctk.html b/0.9.0/_modules/torchaudio/datasets/vctk.html new file mode 100644 index 0000000000..537c45476f --- /dev/null +++ b/0.9.0/_modules/torchaudio/datasets/vctk.html @@ -0,0 +1,892 @@ + + + + + + + + + + + + torchaudio.datasets.vctk — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.datasets.vctk

+import os
+import warnings
+from pathlib import Path
+from typing import Tuple, Union
+
+from torch import Tensor
+from torch.utils.data import Dataset
+
+import torchaudio
+from torchaudio.datasets.utils import (
+    download_url,
+    extract_archive,
+)
+
+URL = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip"
+FOLDER_IN_ARCHIVE = "VCTK-Corpus"
+_CHECKSUMS = {
+    "https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip": "8a6ba2946b36fcbef0212cad601f4bfa"
+}
+
+
+def load_vctk_item(fileid: str,
+                   path: str,
+                   ext_audio: str,
+                   ext_txt: str,
+                   folder_audio: str,
+                   folder_txt: str,
+                   downsample: bool = False) -> Tuple[Tensor, int, str, str, str]:
+    speaker_id, utterance_id = fileid.split("_")
+
+    # Read text
+    file_txt = os.path.join(path, folder_txt, speaker_id, fileid + ext_txt)
+    with open(file_txt) as file_text:
+        utterance = file_text.readlines()[0]
+
+    # Read wav
+    file_audio = os.path.join(path, folder_audio, speaker_id, fileid + ext_audio)
+    waveform, sample_rate = torchaudio.load(file_audio)
+    if downsample:
+        # TODO Remove this parameter after deprecation
+        F = torchaudio.functional
+        T = torchaudio.transforms
+        # rate
+        sample = T.Resample(sample_rate, 16000, resampling_method='sinc_interpolation')
+        waveform = sample(waveform)
+        # dither
+        waveform = F.dither(waveform, noise_shaping=True)
+
+    return waveform, sample_rate, utterance, speaker_id, utterance_id
+
+
+
[docs]class VCTK(Dataset): + """Create a Dataset for VCTK. + + Note: + * **This dataset is no longer publicly available.** Please use :py:class:`VCTK_092` + * Directory ``p315`` is ignored because there is no corresponding text files. + For more information about the dataset visit: https://datashare.is.ed.ac.uk/handle/10283/3443 + + Args: + root (str or Path): Path to the directory where the dataset is found or downloaded. + url (str, optional): Not used as the dataset is no longer publicly available. + folder_in_archive (str, optional): + The top-level directory of the dataset. (default: ``"VCTK-Corpus"``) + download (bool, optional): + Whether to download the dataset if it is not found at root path. (default: ``False``). + Giving ``download=True`` will result in error as the dataset is no longer + publicly available. + downsample (bool, optional): Not used. + """ + + _folder_txt = "txt" + _folder_audio = "wav48" + _ext_txt = ".txt" + _ext_audio = ".wav" + _except_folder = "p315" + + def __init__(self, + root: Union[str, Path], + url: str = URL, + folder_in_archive: str = FOLDER_IN_ARCHIVE, + download: bool = False, + downsample: bool = False) -> None: + + if downsample: + warnings.warn( + "In the next version, transforms will not be part of the dataset. " + "Please use `downsample=False` to enable this behavior now, " + "and suppress this warning." + ) + + self.downsample = downsample + # Get string representation of 'root' in case Path object is passed + root = os.fspath(root) + + archive = os.path.basename(url) + archive = os.path.join(root, archive) + self._path = os.path.join(root, folder_in_archive) + + if download: + raise RuntimeError( + "This Dataset is no longer available. " + "Please use `VCTK_092` class to download the latest version." + ) + + if not os.path.isdir(self._path): + raise RuntimeError( + "Dataset not found. Please use `VCTK_092` class " + "with `download=True` to donwload the latest version." + ) + + walker = sorted(str(p.stem) for p in Path(self._path).glob('**/*' + self._ext_audio)) + walker = filter(lambda w: self._except_folder not in w, walker) + self._walker = list(walker) + +
[docs] def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, str]: + """Load the n-th sample from the dataset. + + Args: + n (int): The index of the sample to be loaded + + Returns: + tuple: ``(waveform, sample_rate, utterance, speaker_id, utterance_id)`` + """ + fileid = self._walker[n] + item = load_vctk_item( + fileid, + self._path, + self._ext_audio, + self._ext_txt, + self._folder_audio, + self._folder_txt, + ) + + # TODO Upon deprecation, uncomment line below and remove following code + # return item + + waveform, sample_rate, utterance, speaker_id, utterance_id = item + return waveform, sample_rate, utterance, speaker_id, utterance_id
+ + def __len__(self) -> int: + return len(self._walker)
+ + +SampleType = Tuple[Tensor, int, str, str, str] + + +
[docs]class VCTK_092(Dataset): + """Create VCTK 0.92 Dataset + + Args: + root (str): Root directory where the dataset's top level directory is found. + mic_id (str): Microphone ID. Either ``"mic1"`` or ``"mic2"``. (default: ``"mic2"``) + download (bool, optional): + Whether to download the dataset if it is not found at root path. (default: ``False``). + url (str, optional): The URL to download the dataset from. + (default: ``"https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip"``) + audio_ext (str, optional): Custom audio extension if dataset is converted to non-default audio format. + + Note: + * All the speeches from speaker ``p315`` will be skipped due to the lack of the corresponding text files. + * All the speeches from ``p280`` will be skipped for ``mic_id="mic2"`` due to the lack of the audio files. + * Some of the speeches from speaker ``p362`` will be skipped due to the lack of the audio files. + * See Also: https://datashare.is.ed.ac.uk/handle/10283/3443 + """ + + def __init__( + self, + root: str, + mic_id: str = "mic2", + download: bool = False, + url: str = URL, + audio_ext=".flac", + ): + if mic_id not in ["mic1", "mic2"]: + raise RuntimeError( + f'`mic_id` has to be either "mic1" or "mic2". Found: {mic_id}' + ) + + archive = os.path.join(root, "VCTK-Corpus-0.92.zip") + + self._path = os.path.join(root, "VCTK-Corpus-0.92") + self._txt_dir = os.path.join(self._path, "txt") + self._audio_dir = os.path.join(self._path, "wav48_silence_trimmed") + self._mic_id = mic_id + self._audio_ext = audio_ext + + if download: + if not os.path.isdir(self._path): + if not os.path.isfile(archive): + checksum = _CHECKSUMS.get(url, None) + download_url(url, root, hash_value=checksum, hash_type="md5") + extract_archive(archive, self._path) + + if not os.path.isdir(self._path): + raise RuntimeError( + "Dataset not found. Please use `download=True` to download it." + ) + + # Extracting speaker IDs from the folder structure + self._speaker_ids = sorted(os.listdir(self._txt_dir)) + self._sample_ids = [] + + """ + Due to some insufficient data complexity in the 0.92 version of this dataset, + we start traversing the audio folder structure in accordance with the text folder. + As some of the audio files are missing of either ``mic_1`` or ``mic_2`` but the + text is present for the same, we first check for the existence of the audio file + before adding it to the ``sample_ids`` list. + + Once the ``audio_ids`` are loaded into memory we can quickly access the list for + different parameters required by the user. + """ + for speaker_id in self._speaker_ids: + if speaker_id == "p280" and mic_id == "mic2": + continue + utterance_dir = os.path.join(self._txt_dir, speaker_id) + for utterance_file in sorted( + f for f in os.listdir(utterance_dir) if f.endswith(".txt") + ): + utterance_id = os.path.splitext(utterance_file)[0] + audio_path_mic = os.path.join( + self._audio_dir, + speaker_id, + f"{utterance_id}_{mic_id}{self._audio_ext}", + ) + if speaker_id == "p362" and not os.path.isfile(audio_path_mic): + continue + self._sample_ids.append(utterance_id.split("_")) + + def _load_text(self, file_path) -> str: + with open(file_path) as file_path: + return file_path.readlines()[0] + + def _load_audio(self, file_path) -> Tuple[Tensor, int]: + return torchaudio.load(file_path) + + def _load_sample(self, speaker_id: str, utterance_id: str, mic_id: str) -> SampleType: + utterance_path = os.path.join( + self._txt_dir, speaker_id, f"{speaker_id}_{utterance_id}.txt" + ) + audio_path = os.path.join( + self._audio_dir, + speaker_id, + f"{speaker_id}_{utterance_id}_{mic_id}{self._audio_ext}", + ) + + # Reading text + utterance = self._load_text(utterance_path) + + # Reading FLAC + waveform, sample_rate = self._load_audio(audio_path) + + return (waveform, sample_rate, utterance, speaker_id, utterance_id) + +
[docs] def __getitem__(self, n: int) -> SampleType: + """Load the n-th sample from the dataset. + + Args: + n (int): The index of the sample to be loaded + + Returns: + tuple: ``(waveform, sample_rate, utterance, speaker_id, utterance_id)`` + """ + speaker_id, utterance_id = self._sample_ids[n] + return self._load_sample(speaker_id, utterance_id, self._mic_id)
+ + def __len__(self) -> int: + return len(self._sample_ids)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/datasets/yesno.html b/0.9.0/_modules/torchaudio/datasets/yesno.html new file mode 100644 index 0000000000..2b8481ee53 --- /dev/null +++ b/0.9.0/_modules/torchaudio/datasets/yesno.html @@ -0,0 +1,710 @@ + + + + + + + + + + + + torchaudio.datasets.yesno — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.datasets.yesno

+import os
+from pathlib import Path
+from typing import List, Tuple, Union
+
+from torch import Tensor
+from torch.utils.data import Dataset
+
+import torchaudio
+from torchaudio.datasets.utils import (
+    download_url,
+    extract_archive,
+)
+
+
+_RELEASE_CONFIGS = {
+    "release1": {
+        "folder_in_archive": "waves_yesno",
+        "url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz",
+        "checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73",
+    }
+}
+
+
+
[docs]class YESNO(Dataset): + """Create a Dataset for YesNo. + + Args: + root (str or Path): Path to the directory where the dataset is found or downloaded. + url (str, optional): The URL to download the dataset from. + (default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``) + folder_in_archive (str, optional): + The top-level directory of the dataset. (default: ``"waves_yesno"``) + download (bool, optional): + Whether to download the dataset if it is not found at root path. (default: ``False``). + """ + + def __init__( + self, + root: Union[str, Path], + url: str = _RELEASE_CONFIGS["release1"]["url"], + folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"], + download: bool = False + ) -> None: + + self._parse_filesystem(root, url, folder_in_archive, download) + + def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None: + root = Path(root) + archive = os.path.basename(url) + archive = root / archive + + self._path = root / folder_in_archive + if download: + if not os.path.isdir(self._path): + if not os.path.isfile(archive): + checksum = _RELEASE_CONFIGS["release1"]["checksum"] + download_url(url, root, hash_value=checksum) + extract_archive(archive) + + if not os.path.isdir(self._path): + raise RuntimeError( + "Dataset not found. Please use `download=True` to download it." + ) + + self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav")) + + def _load_item(self, fileid: str, path: str): + labels = [int(c) for c in fileid.split("_")] + file_audio = os.path.join(path, fileid + ".wav") + waveform, sample_rate = torchaudio.load(file_audio) + return waveform, sample_rate, labels + +
[docs] def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]: + """Load the n-th sample from the dataset. + + Args: + n (int): The index of the sample to be loaded + + Returns: + tuple: ``(waveform, sample_rate, labels)`` + """ + fileid = self._walker[n] + item = self._load_item(fileid, self._path) + return item
+ + def __len__(self) -> int: + return len(self._walker)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/functional/filtering.html b/0.9.0/_modules/torchaudio/functional/filtering.html new file mode 100644 index 0000000000..5641ed8fcf --- /dev/null +++ b/0.9.0/_modules/torchaudio/functional/filtering.html @@ -0,0 +1,2207 @@ + + + + + + + + + + + + torchaudio.functional.filtering — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.functional.filtering

+import math
+import warnings
+from typing import Optional
+
+import torch
+from torch import Tensor
+
+import torchaudio._internal.fft
+
+
+def _dB2Linear(x: float) -> float:
+    return math.exp(x * math.log(10) / 20.0)
+
+
+def _generate_wave_table(
+    wave_type: str,
+    data_type: str,
+    table_size: int,
+    min: float,
+    max: float,
+    phase: float,
+    device: torch.device,
+) -> Tensor:
+    r"""A helper function for phaser. Generates a table with given parameters.
+
+    Args:
+        wave_type (str): SINE or TRIANGULAR
+        data_type (str): desired data_type ( `INT` or `FLOAT` )
+        table_size (int): desired table size
+        min (float): desired min value
+        max (float): desired max value
+        phase (float): desired phase
+        device (torch.device): Torch device on which table must be generated
+    Returns:
+        Tensor: A 1D tensor with wave table values
+    """
+
+    phase_offset = int(phase / math.pi / 2 * table_size + 0.5)
+
+    t = torch.arange(table_size, device=device, dtype=torch.int32)
+
+    point = (t + phase_offset) % table_size
+
+    d = torch.zeros_like(point, device=device, dtype=torch.float64)
+
+    if wave_type == "SINE":
+        d = (torch.sin(point.to(torch.float64) / table_size * 2 * math.pi) + 1) / 2
+    elif wave_type == "TRIANGLE":
+        d = point.to(torch.float64) * 2 / table_size
+        value = torch.div(4 * point, table_size, rounding_mode='floor')
+        d[value == 0] = d[value == 0] + 0.5
+        d[value == 1] = 1.5 - d[value == 1]
+        d[value == 2] = 1.5 - d[value == 2]
+        d[value == 3] = d[value == 3] - 1.5
+
+    d = d * (max - min) + min
+
+    if data_type == "INT":
+        mask = d < 0
+        d[mask] = d[mask] - 0.5
+        d[~mask] = d[~mask] + 0.5
+        d = d.to(torch.int32)
+    elif data_type == "FLOAT":
+        d = d.to(torch.float32)
+
+    return d
+
+
+
[docs]def allpass_biquad( + waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707 +) -> Tensor: + r"""Design two-pole all-pass filter. Similar to SoX implementation. + + Args: + waveform(torch.Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + central_freq (float or torch.Tensor): central frequency (in Hz) + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + dtype = waveform.dtype + device = waveform.device + central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + + w0 = 2 * math.pi * central_freq / sample_rate + + alpha = torch.sin(w0) / 2 / Q + + b0 = 1 - alpha + b1 = -2 * torch.cos(w0) + b2 = 1 + alpha + a0 = 1 + alpha + a1 = -2 * torch.cos(w0) + a2 = 1 - alpha + return biquad(waveform, b0, b1, b2, a0, a1, a2)
+ + +
[docs]def band_biquad( + waveform: Tensor, + sample_rate: int, + central_freq: float, + Q: float = 0.707, + noise: bool = False, +) -> Tensor: + r"""Design two-pole band filter. Similar to SoX implementation. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + central_freq (float or torch.Tensor): central frequency (in Hz) + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``). + noise (bool, optional) : If ``True``, uses the alternate mode for un-pitched audio (e.g. percussion). + If ``False``, uses mode oriented to pitched audio, i.e. voice, singing, + or instrumental music (Default: ``False``). + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + dtype = waveform.dtype + device = waveform.device + central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + + w0 = 2 * math.pi * central_freq / sample_rate + bw_Hz = central_freq / Q + + a0 = 1.0 + a2 = torch.exp(-2 * math.pi * bw_Hz / sample_rate) + a1 = -4 * a2 / (1 + a2) * torch.cos(w0) + + b0 = torch.sqrt(1 - a1 * a1 / (4 * a2)) * (1 - a2) + + if noise: + mult = torch.sqrt(((1 + a2) * (1 + a2) - a1 * a1) * (1 - a2) / (1 + a2)) / b0 + b0 = mult * b0 + + b1 = 0.0 + b2 = 0.0 + + return biquad(waveform, b0, b1, b2, a0, a1, a2)
+ + +
[docs]def bandpass_biquad( + waveform: Tensor, + sample_rate: int, + central_freq: float, + Q: float = 0.707, + const_skirt_gain: bool = False, +) -> Tensor: + r"""Design two-pole band-pass filter. Similar to SoX implementation. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + central_freq (float or torch.Tensor): central frequency (in Hz) + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) + const_skirt_gain (bool, optional) : If ``True``, uses a constant skirt gain (peak gain = Q). + If ``False``, uses a constant 0dB peak gain. (Default: ``False``) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + dtype = waveform.dtype + device = waveform.device + central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + + w0 = 2 * math.pi * central_freq / sample_rate + alpha = torch.sin(w0) / 2 / Q + + temp = torch.sin(w0) / 2 if const_skirt_gain else alpha + b0 = temp + b1 = 0.0 + b2 = -temp + a0 = 1 + alpha + a1 = -2 * torch.cos(w0) + a2 = 1 - alpha + return biquad(waveform, b0, b1, b2, a0, a1, a2)
+ + +
[docs]def bandreject_biquad( + waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707 +) -> Tensor: + r"""Design two-pole band-reject filter. Similar to SoX implementation. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + central_freq (float or torch.Tensor): central frequency (in Hz) + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + dtype = waveform.dtype + device = waveform.device + central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + + w0 = 2 * math.pi * central_freq / sample_rate + alpha = torch.sin(w0) / 2 / Q + + b0 = 1.0 + b1 = -2 * torch.cos(w0) + b2 = 1.0 + a0 = 1 + alpha + a1 = -2 * torch.cos(w0) + a2 = 1 - alpha + return biquad(waveform, b0, b1, b2, a0, a1, a2)
+ + +
[docs]def bass_biquad( + waveform: Tensor, + sample_rate: int, + gain: float, + central_freq: float = 100, + Q: float = 0.707, +) -> Tensor: + r"""Design a bass tone-control effect. Similar to SoX implementation. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB. + central_freq (float or torch.Tensor, optional): central frequency (in Hz). (Default: ``100``) + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``). + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + dtype = waveform.dtype + device = waveform.device + central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + gain = torch.as_tensor(gain, dtype=dtype, device=device) + + w0 = 2 * math.pi * central_freq / sample_rate + alpha = torch.sin(w0) / 2 / Q + A = torch.exp(gain / 40 * math.log(10)) + + temp1 = 2 * torch.sqrt(A) * alpha + temp2 = (A - 1) * torch.cos(w0) + temp3 = (A + 1) * torch.cos(w0) + + b0 = A * ((A + 1) - temp2 + temp1) + b1 = 2 * A * ((A - 1) - temp3) + b2 = A * ((A + 1) - temp2 - temp1) + a0 = (A + 1) + temp2 + temp1 + a1 = -2 * ((A - 1) + temp3) + a2 = (A + 1) + temp2 - temp1 + + return biquad(waveform, b0 / a0, b1 / a0, b2 / a0, a0 / a0, a1 / a0, a2 / a0)
+ + +
[docs]def biquad( + waveform: Tensor, b0: float, b1: float, b2: float, a0: float, a1: float, a2: float +) -> Tensor: + r"""Perform a biquad filter of input tensor. Initial conditions set to 0. + https://en.wikipedia.org/wiki/Digital_biquad_filter + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + b0 (float or torch.Tensor): numerator coefficient of current input, x[n] + b1 (float or torch.Tensor): numerator coefficient of input one time step ago x[n-1] + b2 (float or torch.Tensor): numerator coefficient of input two time steps ago x[n-2] + a0 (float or torch.Tensor): denominator coefficient of current output y[n], typically 1 + a1 (float or torch.Tensor): denominator coefficient of current output y[n-1] + a2 (float or torch.Tensor): denominator coefficient of current output y[n-2] + + Returns: + Tensor: Waveform with dimension of `(..., time)` + """ + + device = waveform.device + dtype = waveform.dtype + + b0 = torch.as_tensor(b0, dtype=dtype, device=device).view(1) + b1 = torch.as_tensor(b1, dtype=dtype, device=device).view(1) + b2 = torch.as_tensor(b2, dtype=dtype, device=device).view(1) + a0 = torch.as_tensor(a0, dtype=dtype, device=device).view(1) + a1 = torch.as_tensor(a1, dtype=dtype, device=device).view(1) + a2 = torch.as_tensor(a2, dtype=dtype, device=device).view(1) + + output_waveform = lfilter( + waveform, + torch.cat([a0, a1, a2]), + torch.cat([b0, b1, b2]), + ) + return output_waveform
+ + +
[docs]def contrast(waveform: Tensor, enhancement_amount: float = 75.0) -> Tensor: + r"""Apply contrast effect. Similar to SoX implementation. + Comparable with compression, this effect modifies an audio signal to make it sound louder + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + enhancement_amount (float): controls the amount of the enhancement + Allowed range of values for enhancement_amount : 0-100 + Note that enhancement_amount = 0 still gives a significant contrast enhancement + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + """ + + if not 0 <= enhancement_amount <= 100: + raise ValueError("Allowed range of values for enhancement_amount : 0-100") + + contrast = enhancement_amount / 750.0 + + temp1 = waveform * (math.pi / 2) + temp2 = contrast * torch.sin(temp1 * 4) + output_waveform = torch.sin(temp1 + temp2) + + return output_waveform
+ + +
[docs]def dcshift( + waveform: Tensor, shift: float, limiter_gain: Optional[float] = None +) -> Tensor: + r"""Apply a DC shift to the audio. Similar to SoX implementation. + This can be useful to remove a DC offset + (caused perhaps by a hardware problem in the recording chain) from the audio + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + shift (float): indicates the amount to shift the audio + Allowed range of values for shift : -2.0 to +2.0 + limiter_gain (float): It is used only on peaks to prevent clipping + It should have a value much less than 1 (e.g. 0.05 or 0.02) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + """ + output_waveform = waveform + limiter_threshold = 0.0 + + if limiter_gain is not None: + limiter_threshold = 1.0 - (abs(shift) - limiter_gain) + + if limiter_gain is not None and shift > 0: + mask = waveform > limiter_threshold + temp = ( + (waveform[mask] - limiter_threshold) + * limiter_gain + / (1 - limiter_threshold) + ) + output_waveform[mask] = (temp + limiter_threshold + shift).clamp( + max=limiter_threshold + ) + output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=1) + elif limiter_gain is not None and shift < 0: + mask = waveform < -limiter_threshold + temp = ( + (waveform[mask] + limiter_threshold) + * limiter_gain + / (1 - limiter_threshold) + ) + output_waveform[mask] = (temp - limiter_threshold + shift).clamp( + min=-limiter_threshold + ) + output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=1) + else: + output_waveform = (waveform + shift).clamp(min=-1, max=1) + + return output_waveform
+ + +
[docs]def deemph_biquad(waveform: Tensor, sample_rate: int) -> Tensor: + r"""Apply ISO 908 CD de-emphasis (shelving) IIR filter. Similar to SoX implementation. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, Allowed sample rate ``44100`` or ``48000`` + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + + if sample_rate == 44100: + central_freq = 5283 + width_slope = 0.4845 + gain = -9.477 + elif sample_rate == 48000: + central_freq = 5356 + width_slope = 0.479 + gain = -9.62 + else: + raise ValueError("Sample rate must be 44100 (audio-CD) or 48000 (DAT)") + + w0 = 2 * math.pi * central_freq / sample_rate + A = math.exp(gain / 40.0 * math.log(10)) + alpha = math.sin(w0) / 2 * math.sqrt((A + 1 / A) * (1 / width_slope - 1) + 2) + + temp1 = 2 * math.sqrt(A) * alpha + temp2 = (A - 1) * math.cos(w0) + temp3 = (A + 1) * math.cos(w0) + + b0 = A * ((A + 1) + temp2 + temp1) + b1 = -2 * A * ((A - 1) + temp3) + b2 = A * ((A + 1) + temp2 - temp1) + a0 = (A + 1) - temp2 + temp1 + a1 = 2 * ((A - 1) - temp3) + a2 = (A + 1) - temp2 - temp1 + + return biquad(waveform, b0, b1, b2, a0, a1, a2)
+ + +def _add_noise_shaping(dithered_waveform: Tensor, waveform: Tensor) -> Tensor: + r"""Noise shaping is calculated by error: + error[n] = dithered[n] - original[n] + noise_shaped_waveform[n] = dithered[n] + error[n-1] + """ + wf_shape = waveform.size() + waveform = waveform.reshape(-1, wf_shape[-1]) + + dithered_shape = dithered_waveform.size() + dithered_waveform = dithered_waveform.reshape(-1, dithered_shape[-1]) + + error = dithered_waveform - waveform + + # add error[n-1] to dithered_waveform[n], so offset the error by 1 index + zeros = torch.zeros(1, dtype=error.dtype, device=error.device) + for index in range(error.size()[0]): + err = error[index] + error_offset = torch.cat((zeros, err)) + error[index] = error_offset[: waveform.size()[1]] + + noise_shaped = dithered_waveform + error + return noise_shaped.reshape(dithered_shape[:-1] + noise_shaped.shape[-1:]) + + +def _apply_probability_distribution( + waveform: Tensor, density_function: str = "TPDF" +) -> Tensor: + r"""Apply a probability distribution function on a waveform. + + Triangular probability density function (TPDF) dither noise has a + triangular distribution; values in the center of the range have a higher + probability of occurring. + + Rectangular probability density function (RPDF) dither noise has a + uniform distribution; any value in the specified range has the same + probability of occurring. + + Gaussian probability density function (GPDF) has a normal distribution. + The relationship of probabilities of results follows a bell-shaped, + or Gaussian curve, typical of dither generated by analog sources. + Args: + waveform (Tensor): Tensor of audio of dimension (..., time) + density_function (str, optional): The density function of a + continuous random variable (Default: ``"TPDF"``) + Options: Triangular Probability Density Function - `TPDF` + Rectangular Probability Density Function - `RPDF` + Gaussian Probability Density Function - `GPDF` + Returns: + Tensor: waveform dithered with TPDF + """ + + # pack batch + shape = waveform.size() + waveform = waveform.reshape(-1, shape[-1]) + + channel_size = waveform.size()[0] - 1 + time_size = waveform.size()[-1] - 1 + + random_channel = ( + int( + torch.randint( + channel_size, + [ + 1, + ], + ).item() + ) + if channel_size > 0 + else 0 + ) + random_time = ( + int( + torch.randint( + time_size, + [ + 1, + ], + ).item() + ) + if time_size > 0 + else 0 + ) + + number_of_bits = 16 + up_scaling = 2 ** (number_of_bits - 1) - 2 + signal_scaled = waveform * up_scaling + down_scaling = 2 ** (number_of_bits - 1) + + signal_scaled_dis = waveform + if density_function == "RPDF": + RPDF = waveform[random_channel][random_time] - 0.5 + + signal_scaled_dis = signal_scaled + RPDF + elif density_function == "GPDF": + # TODO Replace by distribution code once + # https://github.com/pytorch/pytorch/issues/29843 is resolved + # gaussian = torch.distributions.normal.Normal(torch.mean(waveform, -1), 1).sample() + + num_rand_variables = 6 + + gaussian = waveform[random_channel][random_time] + for ws in num_rand_variables * [time_size]: + rand_chan = int( + torch.randint( + channel_size, + [ + 1, + ], + ).item() + ) + gaussian += waveform[rand_chan][ + int( + torch.randint( + ws, + [ + 1, + ], + ).item() + ) + ] + + signal_scaled_dis = signal_scaled + gaussian + else: + # dtype needed for https://github.com/pytorch/pytorch/issues/32358 + TPDF = torch.bartlett_window( + time_size + 1, dtype=signal_scaled.dtype, device=signal_scaled.device + ) + TPDF = TPDF.repeat((channel_size + 1), 1) + signal_scaled_dis = signal_scaled + TPDF + + quantised_signal_scaled = torch.round(signal_scaled_dis) + quantised_signal = quantised_signal_scaled / down_scaling + + # unpack batch + return quantised_signal.reshape(shape[:-1] + quantised_signal.shape[-1:]) + + +
[docs]def dither( + waveform: Tensor, density_function: str = "TPDF", noise_shaping: bool = False +) -> Tensor: + r"""Dither increases the perceived dynamic range of audio stored at a + particular bit-depth by eliminating nonlinear truncation distortion + (i.e. adding minimally perceived noise to mask distortion caused by quantization). + + Args: + waveform (Tensor): Tensor of audio of dimension (..., time) + density_function (str, optional): + The density function of a continuous random variable. One of + ``"TPDF"`` (Triangular Probability Density Function), + ``"RPDF"`` (Rectangular Probability Density Function) or + ``"GPDF"`` (Gaussian Probability Density Function) (Default: ``"TPDF"``). + noise_shaping (bool, optional): a filtering process that shapes the spectral + energy of quantisation error (Default: ``False``) + + Returns: + Tensor: waveform dithered + """ + dithered = _apply_probability_distribution( + waveform, density_function=density_function + ) + + if noise_shaping: + return _add_noise_shaping(dithered, waveform) + else: + return dithered
+ + +
[docs]def equalizer_biquad( + waveform: Tensor, + sample_rate: int, + center_freq: float, + gain: float, + Q: float = 0.707, +) -> Tensor: + r"""Design biquad peaking equalizer filter and perform filtering. Similar to SoX implementation. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + center_freq (float): filter's central frequency + gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + """ + dtype = waveform.dtype + device = waveform.device + center_freq = torch.as_tensor(center_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + gain = torch.as_tensor(gain, dtype=dtype, device=device) + + w0 = 2 * math.pi * center_freq / sample_rate + A = torch.exp(gain / 40.0 * math.log(10)) + alpha = torch.sin(w0) / 2 / Q + + b0 = 1 + alpha * A + b1 = -2 * torch.cos(w0) + b2 = 1 - alpha * A + a0 = 1 + alpha / A + a1 = -2 * torch.cos(w0) + a2 = 1 - alpha / A + return biquad(waveform, b0, b1, b2, a0, a1, a2)
+ + +
[docs]def flanger( + waveform: Tensor, + sample_rate: int, + delay: float = 0.0, + depth: float = 2.0, + regen: float = 0.0, + width: float = 71.0, + speed: float = 0.5, + phase: float = 25.0, + modulation: str = "sinusoidal", + interpolation: str = "linear", +) -> Tensor: + r"""Apply a flanger effect to the audio. Similar to SoX implementation. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., channel, time)` . + Max 4 channels allowed + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + delay (float): desired delay in milliseconds(ms) + Allowed range of values are 0 to 30 + depth (float): desired delay depth in milliseconds(ms) + Allowed range of values are 0 to 10 + regen (float): desired regen(feedback gain) in dB + Allowed range of values are -95 to 95 + width (float): desired width(delay gain) in dB + Allowed range of values are 0 to 100 + speed (float): modulation speed in Hz + Allowed range of values are 0.1 to 10 + phase (float): percentage phase-shift for multi-channel + Allowed range of values are 0 to 100 + modulation (str): Use either "sinusoidal" or "triangular" modulation. (Default: ``sinusoidal``) + interpolation (str): Use either "linear" or "quadratic" for delay-line interpolation. (Default: ``linear``) + + Returns: + Tensor: Waveform of dimension of `(..., channel, time)` + + Reference: + - http://sox.sourceforge.net/sox.html + + - Scott Lehman, `Effects Explained`_, + + .. _Effects Explained: + https://web.archive.org/web/20051125072557/http://www.harmony-central.com/Effects/effects-explained.html + """ + + if modulation not in ("sinusoidal", "triangular"): + raise ValueError("Only 'sinusoidal' or 'triangular' modulation allowed") + + if interpolation not in ("linear", "quadratic"): + raise ValueError("Only 'linear' or 'quadratic' interpolation allowed") + + actual_shape = waveform.shape + device, dtype = waveform.device, waveform.dtype + + if actual_shape[-2] > 4: + raise ValueError("Max 4 channels allowed") + + # convert to 3D (batch, channels, time) + waveform = waveform.view(-1, actual_shape[-2], actual_shape[-1]) + + # Scaling + feedback_gain = regen / 100 + delay_gain = width / 100 + channel_phase = phase / 100 + delay_min = delay / 1000 + delay_depth = depth / 1000 + + n_channels = waveform.shape[-2] + + if modulation == "sinusoidal": + wave_type = "SINE" + else: + wave_type = "TRIANGLE" + + # Balance output: + in_gain = 1.0 / (1 + delay_gain) + delay_gain = delay_gain / (1 + delay_gain) + + # Balance feedback loop: + delay_gain = delay_gain * (1 - abs(feedback_gain)) + + delay_buf_length = int((delay_min + delay_depth) * sample_rate + 0.5) + delay_buf_length = delay_buf_length + 2 + + delay_bufs = torch.zeros( + waveform.shape[0], n_channels, delay_buf_length, dtype=dtype, device=device + ) + delay_last = torch.zeros(waveform.shape[0], n_channels, dtype=dtype, device=device) + + lfo_length = int(sample_rate / speed) + + table_min = math.floor(delay_min * sample_rate + 0.5) + table_max = delay_buf_length - 2.0 + + lfo = _generate_wave_table( + wave_type=wave_type, + data_type="FLOAT", + table_size=lfo_length, + min=float(table_min), + max=float(table_max), + phase=3 * math.pi / 2, + device=device, + ) + + output_waveform = torch.zeros_like(waveform, dtype=dtype, device=device) + + delay_buf_pos = 0 + lfo_pos = 0 + channel_idxs = torch.arange(0, n_channels, device=device) + + for i in range(waveform.shape[-1]): + + delay_buf_pos = (delay_buf_pos + delay_buf_length - 1) % delay_buf_length + + cur_channel_phase = (channel_idxs * lfo_length * channel_phase + 0.5).to( + torch.int64 + ) + delay_tensor = lfo[(lfo_pos + cur_channel_phase) % lfo_length] + frac_delay = torch.frac(delay_tensor) + delay_tensor = torch.floor(delay_tensor) + + int_delay = delay_tensor.to(torch.int64) + + temp = waveform[:, :, i] + + delay_bufs[:, :, delay_buf_pos] = temp + delay_last * feedback_gain + + delayed_0 = delay_bufs[ + :, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length + ] + + int_delay = int_delay + 1 + + delayed_1 = delay_bufs[ + :, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length + ] + + int_delay = int_delay + 1 + + if interpolation == "linear": + delayed = delayed_0 + (delayed_1 - delayed_0) * frac_delay + else: + delayed_2 = delay_bufs[ + :, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length + ] + + int_delay = int_delay + 1 + + delayed_2 = delayed_2 - delayed_0 + delayed_1 = delayed_1 - delayed_0 + a = delayed_2 * 0.5 - delayed_1 + b = delayed_1 * 2 - delayed_2 * 0.5 + + delayed = delayed_0 + (a * frac_delay + b) * frac_delay + + delay_last = delayed + output_waveform[:, :, i] = waveform[:, :, i] * in_gain + delayed * delay_gain + + lfo_pos = (lfo_pos + 1) % lfo_length + + return output_waveform.clamp(min=-1, max=1).view(actual_shape)
+ + +
[docs]def gain(waveform: Tensor, gain_db: float = 1.0) -> Tensor: + r"""Apply amplification or attenuation to the whole waveform. + + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + gain_db (float, optional) Gain adjustment in decibels (dB) (Default: ``1.0``). + + Returns: + Tensor: the whole waveform amplified by gain_db. + """ + if gain_db == 0: + return waveform + + ratio = 10 ** (gain_db / 20) + + return waveform * ratio
+ + +
[docs]def highpass_biquad( + waveform: Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707 +) -> Tensor: + r"""Design biquad highpass filter and perform filtering. Similar to SoX implementation. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + cutoff_freq (float or torch.Tensor): filter cutoff frequency + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) + + Returns: + Tensor: Waveform dimension of `(..., time)` + """ + dtype = waveform.dtype + device = waveform.device + cutoff_freq = torch.as_tensor(cutoff_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + + w0 = 2 * math.pi * cutoff_freq / sample_rate + alpha = torch.sin(w0) / 2.0 / Q + + b0 = (1 + torch.cos(w0)) / 2 + b1 = -1 - torch.cos(w0) + b2 = b0 + a0 = 1 + alpha + a1 = -2 * torch.cos(w0) + a2 = 1 - alpha + return biquad(waveform, b0, b1, b2, a0, a1, a2)
+ + +def _lfilter_core_generic_loop(input_signal_windows: Tensor, a_coeffs_flipped: Tensor, padded_output_waveform: Tensor): + n_order = a_coeffs_flipped.size(0) + for i_sample, o0 in enumerate(input_signal_windows.t()): + windowed_output_signal = padded_output_waveform[ + :, i_sample:i_sample + n_order + ] + o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1) + padded_output_waveform[:, i_sample + n_order - 1] = o0 + + +try: + _lfilter_core_cpu_loop = torch.ops.torchaudio._lfilter_core_loop +except RuntimeError as err: + assert str(err) == 'No such operator torchaudio::_lfilter_core_loop' + _lfilter_core_cpu_loop = _lfilter_core_generic_loop + + +def _lfilter_core( + waveform: Tensor, + a_coeffs: Tensor, + b_coeffs: Tensor, +) -> Tensor: + + assert a_coeffs.size(0) == b_coeffs.size(0) + assert len(waveform.size()) == 2 + assert waveform.device == a_coeffs.device + assert b_coeffs.device == a_coeffs.device + + n_channel, n_sample = waveform.size() + n_order = a_coeffs.size(0) + assert n_order > 0 + + # Pad the input and create output + + padded_waveform = torch.nn.functional.pad(waveform, [n_order - 1, 0]) + padded_output_waveform = torch.zeros_like(padded_waveform) + + # Set up the coefficients matrix + # Flip coefficients' order + a_coeffs_flipped = a_coeffs.flip(0) + b_coeffs_flipped = b_coeffs.flip(0) + + # calculate windowed_input_signal in parallel using convolution + input_signal_windows = torch.nn.functional.conv1d( + padded_waveform.unsqueeze(1), + b_coeffs_flipped.view(1, 1, -1) + ).squeeze(1) + + input_signal_windows.div_(a_coeffs[0]) + a_coeffs_flipped.div_(a_coeffs[0]) + + if input_signal_windows.device == torch.device('cpu') and\ + a_coeffs_flipped.device == torch.device('cpu') and\ + padded_output_waveform.device == torch.device('cpu'): + _lfilter_core_cpu_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) + else: + _lfilter_core_generic_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) + + output = padded_output_waveform[:, n_order - 1:] + return output + +try: + _lfilter = torch.ops.torchaudio._lfilter +except RuntimeError as err: + assert str(err) == 'No such operator torchaudio::_lfilter' + _lfilter = _lfilter_core + + +
[docs]def lfilter( + waveform: Tensor, + a_coeffs: Tensor, + b_coeffs: Tensor, + clamp: bool = True, +) -> Tensor: + r"""Perform an IIR filter by evaluating difference equation. + + Note: + To avoid numerical problems, small filter order is preferred. + Using double precision could also minimize numerical precision errors. + + Args: + waveform (Tensor): audio waveform of dimension of ``(..., time)``. Must be normalized to -1 to 1. + a_coeffs (Tensor): denominator coefficients of difference equation of dimension of ``(n_order + 1)``. + Lower delays coefficients are first, e.g. ``[a0, a1, a2, ...]``. + Must be same size as b_coeffs (pad with 0's as necessary). + b_coeffs (Tensor): numerator coefficients of difference equation of dimension of ``(n_order + 1)``. + Lower delays coefficients are first, e.g. ``[b0, b1, b2, ...]``. + Must be same size as a_coeffs (pad with 0's as necessary). + clamp (bool, optional): If ``True``, clamp the output signal to be in the range [-1, 1] (Default: ``True``) + + Returns: + Tensor: Waveform with dimension of ``(..., time)``. + """ + # pack batch + shape = waveform.size() + waveform = waveform.reshape(-1, shape[-1]) + + output = _lfilter(waveform, a_coeffs, b_coeffs) + + if clamp: + output = torch.clamp(output, min=-1.0, max=1.0) + + # unpack batch + output = output.reshape(shape[:-1] + output.shape[-1:]) + + return output
+ + +
[docs]def lowpass_biquad( + waveform: Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707 +) -> Tensor: + r"""Design biquad lowpass filter and perform filtering. Similar to SoX implementation. + + Args: + waveform (torch.Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + cutoff_freq (float or torch.Tensor): filter cutoff frequency + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + """ + dtype = waveform.dtype + device = waveform.device + cutoff_freq = torch.as_tensor(cutoff_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + + w0 = 2 * math.pi * cutoff_freq / sample_rate + alpha = torch.sin(w0) / 2 / Q + + b0 = (1 - torch.cos(w0)) / 2 + b1 = 1 - torch.cos(w0) + b2 = b0 + a0 = 1 + alpha + a1 = -2 * torch.cos(w0) + a2 = 1 - alpha + return biquad(waveform, b0, b1, b2, a0, a1, a2)
+ + +def _overdrive_core_loop_generic( + waveform: Tensor, + temp: Tensor, + last_in: Tensor, + last_out: Tensor, + output_waveform: Tensor +): + for i in range(waveform.shape[-1]): + last_out = temp[:, i] - last_in + 0.995 * last_out + last_in = temp[:, i] + output_waveform[:, i] = waveform[:, i] * 0.5 + last_out * 0.75 + + +try: + _overdrive_core_loop_cpu = torch.ops.torchaudio._overdrive_core_loop +except RuntimeError as err: + assert str(err) == 'No such operator torchaudio::_overdrive_core_loop' + _overdrive_core_loop_cpu = _overdrive_core_loop_generic + + +
[docs]def overdrive(waveform: Tensor, gain: float = 20, colour: float = 20) -> Tensor: + r"""Apply a overdrive effect to the audio. Similar to SoX implementation. + This effect applies a non linear distortion to the audio signal. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + gain (float): desired gain at the boost (or attenuation) in dB + Allowed range of values are 0 to 100 + colour (float): controls the amount of even harmonic content in the over-driven output + Allowed range of values are 0 to 100 + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + """ + actual_shape = waveform.shape + device, dtype = waveform.device, waveform.dtype + + # convert to 2D (..,time) + waveform = waveform.view(-1, actual_shape[-1]) + + gain = _dB2Linear(gain) + colour = colour / 200 + last_in = torch.zeros(waveform.shape[:-1], dtype=dtype, device=device) + last_out = torch.zeros(waveform.shape[:-1], dtype=dtype, device=device) + + temp = waveform * gain + colour + + mask1 = temp < -1 + temp[mask1] = torch.tensor(-2.0 / 3.0, dtype=dtype, device=device) + # Wrapping the constant with Tensor is required for Torchscript + + mask2 = temp > 1 + temp[mask2] = torch.tensor(2.0 / 3.0, dtype=dtype, device=device) + + mask3 = ~mask1 & ~mask2 + temp[mask3] = temp[mask3] - (temp[mask3] ** 3) * (1.0 / 3) + + output_waveform = torch.zeros_like(waveform, dtype=dtype, device=device) + + # Uses CPU optimized loop function if available for CPU device + if device == torch.device('cpu'): + _overdrive_core_loop_cpu(waveform, temp, last_in, last_out, output_waveform) + else: + _overdrive_core_loop_generic(waveform, temp, last_in, last_out, output_waveform) + + return output_waveform.clamp(min=-1, max=1).view(actual_shape)
+ + +
[docs]def phaser( + waveform: Tensor, + sample_rate: int, + gain_in: float = 0.4, + gain_out: float = 0.74, + delay_ms: float = 3.0, + decay: float = 0.4, + mod_speed: float = 0.5, + sinusoidal: bool = True, +) -> Tensor: + r"""Apply a phasing effect to the audio. Similar to SoX implementation. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + gain_in (float): desired input gain at the boost (or attenuation) in dB + Allowed range of values are 0 to 1 + gain_out (float): desired output gain at the boost (or attenuation) in dB + Allowed range of values are 0 to 1e9 + delay_ms (float): desired delay in milliseconds + Allowed range of values are 0 to 5.0 + decay (float): desired decay relative to gain-in + Allowed range of values are 0 to 0.99 + mod_speed (float): modulation speed in Hz + Allowed range of values are 0.1 to 2 + sinusoidal (bool): If ``True``, uses sinusoidal modulation (preferable for multiple instruments) + If ``False``, uses triangular modulation (gives single instruments a sharper phasing effect) + (Default: ``True``) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - Scott Lehman, `Effects Explained`_. + + .. _Effects Explained: + https://web.archive.org/web/20051125072557/http://www.harmony-central.com/Effects/effects-explained.html + """ + actual_shape = waveform.shape + device, dtype = waveform.device, waveform.dtype + + # convert to 2D (channels,time) + waveform = waveform.view(-1, actual_shape[-1]) + + delay_buf_len = int((delay_ms * 0.001 * sample_rate) + 0.5) + delay_buf = torch.zeros( + waveform.shape[0], delay_buf_len, dtype=dtype, device=device + ) + + mod_buf_len = int(sample_rate / mod_speed + 0.5) + + if sinusoidal: + wave_type = "SINE" + else: + wave_type = "TRIANGLE" + + mod_buf = _generate_wave_table( + wave_type=wave_type, + data_type="INT", + table_size=mod_buf_len, + min=1.0, + max=float(delay_buf_len), + phase=math.pi / 2, + device=device, + ) + + delay_pos = 0 + mod_pos = 0 + + output_waveform_pre_gain_list = [] + waveform = waveform * gain_in + delay_buf = delay_buf * decay + waveform_list = [waveform[:, i] for i in range(waveform.size(1))] + delay_buf_list = [delay_buf[:, i] for i in range(delay_buf.size(1))] + mod_buf_list = [mod_buf[i] for i in range(mod_buf.size(0))] + + for i in range(waveform.shape[-1]): + idx = int((delay_pos + mod_buf_list[mod_pos]) % delay_buf_len) + mod_pos = (mod_pos + 1) % mod_buf_len + delay_pos = (delay_pos + 1) % delay_buf_len + temp = (waveform_list[i]) + (delay_buf_list[idx]) + delay_buf_list[delay_pos] = temp * decay + output_waveform_pre_gain_list.append(temp) + + output_waveform = torch.stack(output_waveform_pre_gain_list, dim=1).to( + dtype=dtype, device=device + ) + output_waveform.mul_(gain_out) + + return output_waveform.clamp(min=-1, max=1).view(actual_shape)
+ + +
[docs]def riaa_biquad(waveform: Tensor, sample_rate: int) -> Tensor: + r"""Apply RIAA vinyl playback equalization. Similar to SoX implementation. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz). + Allowed sample rates in Hz : ``44100``,``48000``,``88200``,``96000`` + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + + if sample_rate == 44100: + zeros = [-0.2014898, 0.9233820] + poles = [0.7083149, 0.9924091] + + elif sample_rate == 48000: + zeros = [-0.1766069, 0.9321590] + poles = [0.7396325, 0.9931330] + + elif sample_rate == 88200: + zeros = [-0.1168735, 0.9648312] + poles = [0.8590646, 0.9964002] + + elif sample_rate == 96000: + zeros = [-0.1141486, 0.9676817] + poles = [0.8699137, 0.9966946] + + else: + raise ValueError("Sample rate must be 44.1k, 48k, 88.2k, or 96k") + + # polynomial coefficients with roots zeros[0] and zeros[1] + b0 = 1.0 + b1 = -(zeros[0] + zeros[1]) + b2 = zeros[0] * zeros[1] + + # polynomial coefficients with roots poles[0] and poles[1] + a0 = 1.0 + a1 = -(poles[0] + poles[1]) + a2 = poles[0] * poles[1] + + # Normalize to 0dB at 1kHz + y = 2 * math.pi * 1000 / sample_rate + b_re = b0 + b1 * math.cos(-y) + b2 * math.cos(-2 * y) + a_re = a0 + a1 * math.cos(-y) + a2 * math.cos(-2 * y) + b_im = b1 * math.sin(-y) + b2 * math.sin(-2 * y) + a_im = a1 * math.sin(-y) + a2 * math.sin(-2 * y) + g = 1 / math.sqrt((b_re ** 2 + b_im ** 2) / (a_re ** 2 + a_im ** 2)) + + b0 *= g + b1 *= g + b2 *= g + + return biquad(waveform, b0, b1, b2, a0, a1, a2)
+ + +
[docs]def treble_biquad( + waveform: Tensor, + sample_rate: int, + gain: float, + central_freq: float = 3000, + Q: float = 0.707, +) -> Tensor: + r"""Design a treble tone-control effect. Similar to SoX implementation. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB. + central_freq (float or torch.Tensor, optional): central frequency (in Hz). (Default: ``3000``) + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``). + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + dtype = waveform.dtype + device = waveform.device + central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + gain = torch.as_tensor(gain, dtype=dtype, device=device) + + w0 = 2 * math.pi * central_freq / sample_rate + alpha = torch.sin(w0) / 2 / Q + A = torch.exp(gain / 40 * math.log(10)) + + temp1 = 2 * torch.sqrt(A) * alpha + temp2 = (A - 1) * torch.cos(w0) + temp3 = (A + 1) * torch.cos(w0) + + b0 = A * ((A + 1) + temp2 + temp1) + b1 = -2 * A * ((A - 1) + temp3) + b2 = A * ((A + 1) + temp2 - temp1) + a0 = (A + 1) - temp2 + temp1 + a1 = 2 * ((A - 1) - temp3) + a2 = (A + 1) - temp2 - temp1 + + return biquad(waveform, b0, b1, b2, a0, a1, a2)
+ + +def _measure( + measure_len_ws: int, + samples: Tensor, + spectrum: Tensor, + noise_spectrum: Tensor, + spectrum_window: Tensor, + spectrum_start: int, + spectrum_end: int, + cepstrum_window: Tensor, + cepstrum_start: int, + cepstrum_end: int, + noise_reduction_amount: float, + measure_smooth_time_mult: float, + noise_up_time_mult: float, + noise_down_time_mult: float, + index_ns: int, + boot_count: int, +) -> float: + + assert spectrum.size()[-1] == noise_spectrum.size()[-1] + + samplesLen_ns = samples.size()[-1] + dft_len_ws = spectrum.size()[-1] + + dftBuf = torch.zeros(dft_len_ws) + + _index_ns = torch.tensor( + [index_ns] + [(index_ns + i) % samplesLen_ns for i in range(1, measure_len_ws)] + ) + dftBuf[:measure_len_ws] = samples[_index_ns] * spectrum_window[:measure_len_ws] + + # memset(c->dftBuf + i, 0, (p->dft_len_ws - i) * sizeof(*c->dftBuf)); + dftBuf[measure_len_ws:dft_len_ws].zero_() + + # lsx_safe_rdft((int)p->dft_len_ws, 1, c->dftBuf); + _dftBuf = torchaudio._internal.fft.rfft(dftBuf) + + # memset(c->dftBuf, 0, p->spectrum_start * sizeof(*c->dftBuf)); + _dftBuf[:spectrum_start].zero_() + + mult: float = ( + boot_count / (1.0 + boot_count) if boot_count >= 0 else measure_smooth_time_mult + ) + + _d = _dftBuf[spectrum_start:spectrum_end].abs() + spectrum[spectrum_start:spectrum_end].mul_(mult).add_(_d * (1 - mult)) + _d = spectrum[spectrum_start:spectrum_end] ** 2 + + _zeros = torch.zeros(spectrum_end - spectrum_start) + _mult = ( + _zeros + if boot_count >= 0 + else torch.where( + _d > noise_spectrum[spectrum_start:spectrum_end], + torch.tensor(noise_up_time_mult), # if + torch.tensor(noise_down_time_mult), # else + ) + ) + + noise_spectrum[spectrum_start:spectrum_end].mul_(_mult).add_(_d * (1 - _mult)) + _d = torch.sqrt( + torch.max( + _zeros, + _d - noise_reduction_amount * noise_spectrum[spectrum_start:spectrum_end], + ) + ) + + _cepstrum_Buf: Tensor = torch.zeros(dft_len_ws >> 1) + _cepstrum_Buf[spectrum_start:spectrum_end] = _d * cepstrum_window + _cepstrum_Buf[spectrum_end:dft_len_ws >> 1].zero_() + + # lsx_safe_rdft((int)p->dft_len_ws >> 1, 1, c->dftBuf); + _cepstrum_Buf = torchaudio._internal.fft.rfft(_cepstrum_Buf) + + result: float = float( + torch.sum(_cepstrum_Buf[cepstrum_start:cepstrum_end].abs().pow(2)) + ) + result = ( + math.log(result / (cepstrum_end - cepstrum_start)) if result > 0 else -math.inf + ) + return max(0, 21 + result) + + +
[docs]def vad( + waveform: Tensor, + sample_rate: int, + trigger_level: float = 7.0, + trigger_time: float = 0.25, + search_time: float = 1.0, + allowed_gap: float = 0.25, + pre_trigger_time: float = 0.0, + # Fine-tuning parameters + boot_time: float = 0.35, + noise_up_time: float = 0.1, + noise_down_time: float = 0.01, + noise_reduction_amount: float = 1.35, + measure_freq: float = 20.0, + measure_duration: Optional[float] = None, + measure_smooth_time: float = 0.4, + hp_filter_freq: float = 50.0, + lp_filter_freq: float = 6000.0, + hp_lifter_freq: float = 150.0, + lp_lifter_freq: float = 2000.0, +) -> Tensor: + r"""Voice Activity Detector. Similar to SoX implementation. + Attempts to trim silence and quiet background sounds from the ends of recordings of speech. + The algorithm currently uses a simple cepstral power measurement to detect voice, + so may be fooled by other things, especially music. + + The effect can trim only from the front of the audio, + so in order to trim from the back, the reverse effect must also be used. + + Args: + waveform (Tensor): Tensor of audio of dimension `(channels, time)` or `(time)` + Tensor of shape `(channels, time)` is treated as a multi-channel recording + of the same event and the resulting output will be trimmed to the earliest + voice activity in any channel. + sample_rate (int): Sample rate of audio signal. + trigger_level (float, optional): The measurement level used to trigger activity detection. + This may need to be cahnged depending on the noise level, signal level, + and other characteristics of the input audio. (Default: 7.0) + trigger_time (float, optional): The time constant (in seconds) + used to help ignore short bursts of sound. (Default: 0.25) + search_time (float, optional): The amount of audio (in seconds) + to search for quieter/shorter bursts of audio to include prior + to the detected trigger point. (Default: 1.0) + allowed_gap (float, optional): The allowed gap (in seconds) between + quieter/shorter bursts of audio to include prior + to the detected trigger point. (Default: 0.25) + pre_trigger_time (float, optional): The amount of audio (in seconds) to preserve + before the trigger point and any found quieter/shorter bursts. (Default: 0.0) + boot_time (float, optional) The algorithm (internally) uses adaptive noise + estimation/reduction in order to detect the start of the wanted audio. + This option sets the time for the initial noise estimate. (Default: 0.35) + noise_up_time (float, optional) Time constant used by the adaptive noise estimator + for when the noise level is increasing. (Default: 0.1) + noise_down_time (float, optional) Time constant used by the adaptive noise estimator + for when the noise level is decreasing. (Default: 0.01) + noise_reduction_amount (float, optional) Amount of noise reduction to use in + the detection algorithm (e.g. 0, 0.5, ...). (Default: 1.35) + measure_freq (float, optional) Frequency of the algorithm’s + processing/measurements. (Default: 20.0) + measure_duration: (float, optional) Measurement duration. + (Default: Twice the measurement period; i.e. with overlap.) + measure_smooth_time (float, optional) Time constant used to smooth + spectral measurements. (Default: 0.4) + hp_filter_freq (float, optional) "Brick-wall" frequency of high-pass filter applied + at the input to the detector algorithm. (Default: 50.0) + lp_filter_freq (float, optional) "Brick-wall" frequency of low-pass filter applied + at the input to the detector algorithm. (Default: 6000.0) + hp_lifter_freq (float, optional) "Brick-wall" frequency of high-pass lifter used + in the detector algorithm. (Default: 150.0) + lp_lifter_freq (float, optional) "Brick-wall" frequency of low-pass lifter used + in the detector algorithm. (Default: 2000.0) + + Returns: + Tensor: Tensor of audio of dimension (..., time). + + Reference: + - http://sox.sourceforge.net/sox.html + """ + + if waveform.ndim > 2: + warnings.warn( + "Expected input tensor dimension of 1 for single channel" + f" or 2 for multi-channel. Got {waveform.ndim} instead. " + "Batch semantics is not supported. " + "Please refer to https://github.com/pytorch/audio/issues/1348" + " and https://github.com/pytorch/audio/issues/1468." + ) + + measure_duration: float = ( + 2.0 / measure_freq if measure_duration is None else measure_duration + ) + + measure_len_ws = int(sample_rate * measure_duration + 0.5) + measure_len_ns = measure_len_ws + # for (dft_len_ws = 16; dft_len_ws < measure_len_ws; dft_len_ws <<= 1); + dft_len_ws = 16 + while dft_len_ws < measure_len_ws: + dft_len_ws *= 2 + + measure_period_ns = int(sample_rate / measure_freq + 0.5) + measures_len = math.ceil(search_time * measure_freq) + search_pre_trigger_len_ns = measures_len * measure_period_ns + gap_len = int(allowed_gap * measure_freq + 0.5) + + fixed_pre_trigger_len_ns = int(pre_trigger_time * sample_rate + 0.5) + samplesLen_ns = ( + fixed_pre_trigger_len_ns + search_pre_trigger_len_ns + measure_len_ns + ) + + spectrum_window = torch.zeros(measure_len_ws) + for i in range(measure_len_ws): + # sox.h:741 define SOX_SAMPLE_MIN (sox_sample_t)SOX_INT_MIN(32) + spectrum_window[i] = 2.0 / math.sqrt(float(measure_len_ws)) + # lsx_apply_hann(spectrum_window, (int)measure_len_ws); + spectrum_window *= torch.hann_window(measure_len_ws, dtype=torch.float) + + spectrum_start: int = int(hp_filter_freq / sample_rate * dft_len_ws + 0.5) + spectrum_start: int = max(spectrum_start, 1) + spectrum_end: int = int(lp_filter_freq / sample_rate * dft_len_ws + 0.5) + spectrum_end: int = min(spectrum_end, dft_len_ws // 2) + + cepstrum_window = torch.zeros(spectrum_end - spectrum_start) + for i in range(spectrum_end - spectrum_start): + cepstrum_window[i] = 2.0 / math.sqrt(float(spectrum_end) - spectrum_start) + # lsx_apply_hann(cepstrum_window,(int)(spectrum_end - spectrum_start)); + cepstrum_window *= torch.hann_window( + spectrum_end - spectrum_start, dtype=torch.float + ) + + cepstrum_start = math.ceil(sample_rate * 0.5 / lp_lifter_freq) + cepstrum_end = math.floor(sample_rate * 0.5 / hp_lifter_freq) + cepstrum_end = min(cepstrum_end, dft_len_ws // 4) + + assert cepstrum_end > cepstrum_start + + noise_up_time_mult = math.exp(-1.0 / (noise_up_time * measure_freq)) + noise_down_time_mult = math.exp(-1.0 / (noise_down_time * measure_freq)) + measure_smooth_time_mult = math.exp(-1.0 / (measure_smooth_time * measure_freq)) + trigger_meas_time_mult = math.exp(-1.0 / (trigger_time * measure_freq)) + + boot_count_max = int(boot_time * measure_freq - 0.5) + measure_timer_ns = measure_len_ns + boot_count = measures_index = flushedLen_ns = samplesIndex_ns = 0 + + # pack batch + shape = waveform.size() + waveform = waveform.view(-1, shape[-1]) + + n_channels, ilen = waveform.size() + + mean_meas = torch.zeros(n_channels) + samples = torch.zeros(n_channels, samplesLen_ns) + spectrum = torch.zeros(n_channels, dft_len_ws) + noise_spectrum = torch.zeros(n_channels, dft_len_ws) + measures = torch.zeros(n_channels, measures_len) + + has_triggered: bool = False + num_measures_to_flush: int = 0 + pos: int = 0 + + while pos < ilen and not has_triggered: + measure_timer_ns -= 1 + for i in range(n_channels): + samples[i, samplesIndex_ns] = waveform[i, pos] + # if (!p->measure_timer_ns) { + if measure_timer_ns == 0: + index_ns: int = ( + samplesIndex_ns + samplesLen_ns - measure_len_ns + ) % samplesLen_ns + meas: float = _measure( + measure_len_ws=measure_len_ws, + samples=samples[i], + spectrum=spectrum[i], + noise_spectrum=noise_spectrum[i], + spectrum_window=spectrum_window, + spectrum_start=spectrum_start, + spectrum_end=spectrum_end, + cepstrum_window=cepstrum_window, + cepstrum_start=cepstrum_start, + cepstrum_end=cepstrum_end, + noise_reduction_amount=noise_reduction_amount, + measure_smooth_time_mult=measure_smooth_time_mult, + noise_up_time_mult=noise_up_time_mult, + noise_down_time_mult=noise_down_time_mult, + index_ns=index_ns, + boot_count=boot_count, + ) + measures[i, measures_index] = meas + mean_meas[i] = mean_meas[i] * trigger_meas_time_mult + meas * ( + 1.0 - trigger_meas_time_mult + ) + + has_triggered = has_triggered or (mean_meas[i] >= trigger_level) + if has_triggered: + n: int = measures_len + k: int = measures_index + jTrigger: int = n + jZero: int = n + j: int = 0 + + for j in range(n): + if (measures[i, k] >= trigger_level) and ( + j <= jTrigger + gap_len + ): + jZero = jTrigger = j + elif (measures[i, k] == 0) and (jTrigger >= jZero): + jZero = j + k = (k + n - 1) % n + j = min(j, jZero) + # num_measures_to_flush = range_limit(j, num_measures_to_flush, n); + num_measures_to_flush = min(max(num_measures_to_flush, j), n) + # end if has_triggered + # end if (measure_timer_ns == 0): + # end for + samplesIndex_ns += 1 + pos += 1 + # end while + if samplesIndex_ns == samplesLen_ns: + samplesIndex_ns = 0 + if measure_timer_ns == 0: + measure_timer_ns = measure_period_ns + measures_index += 1 + measures_index = measures_index % measures_len + if boot_count >= 0: + boot_count = -1 if boot_count == boot_count_max else boot_count + 1 + + if has_triggered: + flushedLen_ns = (measures_len - num_measures_to_flush) * measure_period_ns + samplesIndex_ns = (samplesIndex_ns + flushedLen_ns) % samplesLen_ns + + res = waveform[:, pos - samplesLen_ns + flushedLen_ns:] + # unpack batch + return res.view(shape[:-1] + res.shape[-1:])
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/functional/functional.html b/0.9.0/_modules/torchaudio/functional/functional.html new file mode 100644 index 0000000000..efe5c7a3bc --- /dev/null +++ b/0.9.0/_modules/torchaudio/functional/functional.html @@ -0,0 +1,2063 @@ + + + + + + + + + + + + torchaudio.functional.functional — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.functional.functional

+# -*- coding: utf-8 -*-
+
+import io
+import math
+import warnings
+from typing import Optional, Tuple
+
+import torch
+from torch import Tensor
+from torchaudio._internal import module_utils as _mod_utils
+import torchaudio
+
+__all__ = [
+    "spectrogram",
+    "griffinlim",
+    "amplitude_to_DB",
+    "DB_to_amplitude",
+    "compute_deltas",
+    "compute_kaldi_pitch",
+    "create_fb_matrix",
+    "create_dct",
+    "compute_deltas",
+    "detect_pitch_frequency",
+    "DB_to_amplitude",
+    "mu_law_encoding",
+    "mu_law_decoding",
+    "complex_norm",
+    "angle",
+    "magphase",
+    "phase_vocoder",
+    'mask_along_axis',
+    'mask_along_axis_iid',
+    'sliding_window_cmn',
+    "spectral_centroid",
+    "apply_codec",
+    "resample",
+]
+
+
+
[docs]def spectrogram( + waveform: Tensor, + pad: int, + window: Tensor, + n_fft: int, + hop_length: int, + win_length: int, + power: Optional[float], + normalized: bool, + center: bool = True, + pad_mode: str = "reflect", + onesided: bool = True, + return_complex: bool = False, +) -> Tensor: + r"""Create a spectrogram or a batch of spectrograms from a raw audio signal. + The spectrogram can be either magnitude-only or complex. + + Args: + waveform (Tensor): Tensor of audio of dimension (..., time) + pad (int): Two sided padding of signal + window (Tensor): Window tensor that is applied/multiplied to each frame/window + n_fft (int): Size of FFT + hop_length (int): Length of hop between STFT windows + win_length (int): Window size + power (float or None): Exponent for the magnitude spectrogram, + (must be > 0) e.g., 1 for energy, 2 for power, etc. + If None, then the complex spectrum is returned instead. + normalized (bool): Whether to normalize by magnitude after stft + center (bool, optional): whether to pad :attr:`waveform` on both sides so + that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. + Default: ``True`` + pad_mode (string, optional): controls the padding method used when + :attr:`center` is ``True``. Default: ``"reflect"`` + onesided (bool, optional): controls whether to return half of results to + avoid redundancy. Default: ``True`` + return_complex (bool, optional): + Indicates whether the resulting complex-valued Tensor should be represented with + native complex dtype, such as `torch.cfloat` and `torch.cdouble`, or real dtype + mimicking complex value with an extra dimension for real and imaginary parts. + This argument is only effective when ``power=None``. + See also ``torch.view_as_real``. + + Returns: + Tensor: Dimension (..., freq, time), freq is + ``n_fft // 2 + 1`` and ``n_fft`` is the number of + Fourier bins, and time is the number of window hops (n_frame). + """ + if power is None and not return_complex: + warnings.warn( + "The use of pseudo complex type in spectrogram is now deprecated." + "Please migrate to native complex type by providing `return_complex=True`. " + "Please refer to https://github.com/pytorch/audio/issues/1337 " + "for more details about torchaudio's plan to migrate to native complex type." + ) + + if pad > 0: + # TODO add "with torch.no_grad():" back when JIT supports it + waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant") + + # pack batch + shape = waveform.size() + waveform = waveform.reshape(-1, shape[-1]) + + # default values are consistent with librosa.core.spectrum._spectrogram + spec_f = torch.stft( + input=waveform, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=window, + center=center, + pad_mode=pad_mode, + normalized=False, + onesided=onesided, + return_complex=True, + ) + + # unpack batch + spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:]) + + if normalized: + spec_f /= window.pow(2.).sum().sqrt() + if power is not None: + if power == 1.0: + return spec_f.abs() + return spec_f.abs().pow(power) + if not return_complex: + return torch.view_as_real(spec_f) + return spec_f
+ + +def _get_complex_dtype(real_dtype: torch.dtype): + if real_dtype == torch.double: + return torch.cdouble + if real_dtype == torch.float: + return torch.cfloat + if real_dtype == torch.half: + return torch.complex32 + raise ValueError(f'Unexpected dtype {real_dtype}') + + +
[docs]def griffinlim( + specgram: Tensor, + window: Tensor, + n_fft: int, + hop_length: int, + win_length: int, + power: float, + n_iter: int, + momentum: float, + length: Optional[int], + rand_init: bool +) -> Tensor: + r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation. + + Implementation ported from + *librosa* [:footcite:`brian_mcfee-proc-scipy-2015`], *A fast Griffin-Lim algorithm* [:footcite:`6701851`] + and *Signal estimation from modified short-time Fourier transform* [:footcite:`1172092`]. + + Args: + specgram (Tensor): A magnitude-only STFT spectrogram of dimension (..., freq, frames) + where freq is ``n_fft // 2 + 1``. + window (Tensor): Window tensor that is applied/multiplied to each frame/window + n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins + hop_length (int): Length of hop between STFT windows. ( + Default: ``win_length // 2``) + win_length (int): Window size. (Default: ``n_fft``) + power (float): Exponent for the magnitude spectrogram, + (must be > 0) e.g., 1 for energy, 2 for power, etc. + n_iter (int): Number of iteration for phase recovery process. + momentum (float): The momentum parameter for fast Griffin-Lim. + Setting this to 0 recovers the original Griffin-Lim method. + Values near 1 can lead to faster convergence, but above 1 may not converge. + length (int or None): Array length of the expected output. + rand_init (bool): Initializes phase randomly if True, to zero otherwise. + + Returns: + torch.Tensor: waveform of (..., time), where time equals the ``length`` parameter if given. + """ + assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum) + assert momentum >= 0, 'momentum={} < 0'.format(momentum) + + # pack batch + shape = specgram.size() + specgram = specgram.reshape([-1] + list(shape[-2:])) + + specgram = specgram.pow(1 / power) + + # initialize the phase + if rand_init: + angles = torch.rand( + specgram.size(), + dtype=_get_complex_dtype(specgram.dtype), device=specgram.device) + else: + angles = torch.full( + specgram.size(), 1, + dtype=_get_complex_dtype(specgram.dtype), device=specgram.device) + + # And initialize the previous iterate to 0 + tprev = torch.tensor(0., dtype=specgram.dtype, device=specgram.device) + for _ in range(n_iter): + # Invert with our current estimate of the phases + inverse = torch.istft(specgram * angles, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=window, + length=length) + + # Rebuild the spectrogram + rebuilt = torch.stft( + input=inverse, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=window, + center=True, + pad_mode='reflect', + normalized=False, + onesided=True, + return_complex=True, + ) + + # Update our phase estimates + angles = rebuilt + if momentum: + angles = angles - tprev.mul_(momentum / (1 + momentum)) + angles = angles.div(angles.abs().add(1e-16)) + + # Store the previous iterate + tprev = rebuilt + + # Return the final phase estimates + waveform = torch.istft(specgram * angles, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=window, + length=length) + + # unpack batch + waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:]) + + return waveform
+ + +
[docs]def amplitude_to_DB( + x: Tensor, + multiplier: float, + amin: float, + db_multiplier: float, + top_db: Optional[float] = None +) -> Tensor: + r"""Turn a spectrogram from the power/amplitude scale to the decibel scale. + + The output of each tensor in a batch depends on the maximum value of that tensor, + and so may return different values for an audio clip split into snippets vs. a full clip. + + Args: + + x (Tensor): Input spectrogram(s) before being converted to decibel scale. Input should take + the form `(..., freq, time)`. Batched inputs should include a channel dimension and + have the form `(batch, channel, freq, time)`. + multiplier (float): Use 10. for power and 20. for amplitude + amin (float): Number to clamp ``x`` + db_multiplier (float): Log10(max(reference value and amin)) + top_db (float or None, optional): Minimum negative cut-off in decibels. A reasonable number + is 80. (Default: ``None``) + + Returns: + Tensor: Output tensor in decibel scale + """ + x_db = multiplier * torch.log10(torch.clamp(x, min=amin)) + x_db -= multiplier * db_multiplier + + if top_db is not None: + # Expand batch + shape = x_db.size() + packed_channels = shape[-3] if x_db.dim() > 2 else 1 + x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1]) + + x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1)) + + # Repack batch + x_db = x_db.reshape(shape) + + return x_db
+ + +
[docs]def DB_to_amplitude( + x: Tensor, + ref: float, + power: float +) -> Tensor: + r"""Turn a tensor from the decibel scale to the power/amplitude scale. + + Args: + x (Tensor): Input tensor before being converted to power/amplitude scale. + ref (float): Reference which the output will be scaled by. + power (float): If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude. + + Returns: + Tensor: Output tensor in power/amplitude scale. + """ + return ref * torch.pow(torch.pow(10.0, 0.1 * x), power)
+ + +def _hz_to_mel(freq: float, mel_scale: str = "htk") -> float: + r"""Convert Hz to Mels. + + Args: + freqs (float): Frequencies in Hz + mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) + + Returns: + mels (float): Frequency in Mels + """ + + if mel_scale not in ['slaney', 'htk']: + raise ValueError('mel_scale should be one of "htk" or "slaney".') + + if mel_scale == "htk": + return 2595.0 * math.log10(1.0 + (freq / 700.0)) + + # Fill in the linear part + f_min = 0.0 + f_sp = 200.0 / 3 + + mels = (freq - f_min) / f_sp + + # Fill in the log-scale part + min_log_hz = 1000.0 + min_log_mel = (min_log_hz - f_min) / f_sp + logstep = math.log(6.4) / 27.0 + + if freq >= min_log_hz: + mels = min_log_mel + math.log(freq / min_log_hz) / logstep + + return mels + + +def _mel_to_hz(mels: Tensor, mel_scale: str = "htk") -> Tensor: + """Convert mel bin numbers to frequencies. + + Args: + mels (Tensor): Mel frequencies + mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) + + Returns: + freqs (Tensor): Mels converted in Hz + """ + + if mel_scale not in ['slaney', 'htk']: + raise ValueError('mel_scale should be one of "htk" or "slaney".') + + if mel_scale == "htk": + return 700.0 * (10.0**(mels / 2595.0) - 1.0) + + # Fill in the linear scale + f_min = 0.0 + f_sp = 200.0 / 3 + freqs = f_min + f_sp * mels + + # And now the nonlinear scale + min_log_hz = 1000.0 + min_log_mel = (min_log_hz - f_min) / f_sp + logstep = math.log(6.4) / 27.0 + + log_t = (mels >= min_log_mel) + freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel)) + + return freqs + + +
[docs]def create_fb_matrix( + n_freqs: int, + f_min: float, + f_max: float, + n_mels: int, + sample_rate: int, + norm: Optional[str] = None, + mel_scale: str = "htk", +) -> Tensor: + r"""Create a frequency bin conversion matrix. + + Args: + n_freqs (int): Number of frequencies to highlight/apply + f_min (float): Minimum frequency (Hz) + f_max (float): Maximum frequency (Hz) + n_mels (int): Number of mel filterbanks + sample_rate (int): Sample rate of the audio waveform + norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band + (area normalization). (Default: ``None``) + mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) + + Returns: + Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``) + meaning number of frequencies to highlight/apply to x the number of filterbanks. + Each column is a filterbank so that assuming there is a matrix A of + size (..., ``n_freqs``), the applied result would be + ``A * create_fb_matrix(A.size(-1), ...)``. + """ + + if norm is not None and norm != "slaney": + raise ValueError("norm must be one of None or 'slaney'") + + # freq bins + # Equivalent filterbank construction by Librosa + all_freqs = torch.linspace(0, sample_rate // 2, n_freqs) + + # calculate mel freq bins + m_min = _hz_to_mel(f_min, mel_scale=mel_scale) + m_max = _hz_to_mel(f_max, mel_scale=mel_scale) + + m_pts = torch.linspace(m_min, m_max, n_mels + 2) + f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale) + + # calculate the difference between each mel point and each stft freq point in hertz + f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1) + slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2) + # create overlapping triangles + zero = torch.zeros(1) + down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels) + up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels) + fb = torch.max(zero, torch.min(down_slopes, up_slopes)) + + if norm is not None and norm == "slaney": + # Slaney-style mel is scaled to be approx constant energy per channel + enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels]) + fb *= enorm.unsqueeze(0) + + if (fb.max(dim=0).values == 0.).any(): + warnings.warn( + "At least one mel filterbank has all zero values. " + f"The value for `n_mels` ({n_mels}) may be set too high. " + f"Or, the value for `n_freqs` ({n_freqs}) may be set too low." + ) + + return fb
+ + +
[docs]def create_dct( + n_mfcc: int, + n_mels: int, + norm: Optional[str] +) -> Tensor: + r"""Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``), + normalized depending on norm. + + Args: + n_mfcc (int): Number of mfc coefficients to retain + n_mels (int): Number of mel filterbanks + norm (str or None): Norm to use (either 'ortho' or None) + + Returns: + Tensor: The transformation matrix, to be right-multiplied to + row-wise data of size (``n_mels``, ``n_mfcc``). + """ + # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II + n = torch.arange(float(n_mels)) + k = torch.arange(float(n_mfcc)).unsqueeze(1) + dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) # size (n_mfcc, n_mels) + if norm is None: + dct *= 2.0 + else: + assert norm == "ortho" + dct[0] *= 1.0 / math.sqrt(2.0) + dct *= math.sqrt(2.0 / float(n_mels)) + return dct.t()
+ + +
[docs]def mu_law_encoding( + x: Tensor, + quantization_channels: int +) -> Tensor: + r"""Encode signal based on mu-law companding. For more info see the + `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ + + This algorithm assumes the signal has been scaled to between -1 and 1 and + returns a signal encoded with values from 0 to quantization_channels - 1. + + Args: + x (Tensor): Input tensor + quantization_channels (int): Number of channels + + Returns: + Tensor: Input after mu-law encoding + """ + mu = quantization_channels - 1.0 + if not x.is_floating_point(): + x = x.to(torch.float) + mu = torch.tensor(mu, dtype=x.dtype) + x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu) + x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64) + return x_mu
+ + +
[docs]def mu_law_decoding( + x_mu: Tensor, + quantization_channels: int +) -> Tensor: + r"""Decode mu-law encoded signal. For more info see the + `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ + + This expects an input with values between 0 and quantization_channels - 1 + and returns a signal scaled between -1 and 1. + + Args: + x_mu (Tensor): Input tensor + quantization_channels (int): Number of channels + + Returns: + Tensor: Input after mu-law decoding + """ + mu = quantization_channels - 1.0 + if not x_mu.is_floating_point(): + x_mu = x_mu.to(torch.float) + mu = torch.tensor(mu, dtype=x_mu.dtype) + x = ((x_mu) / mu) * 2 - 1.0 + x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu + return x
+ + +
[docs]@_mod_utils.deprecated( + "Please convert the input Tensor to complex type with `torch.view_as_complex` then " + "use `torch.abs`. " + "Please refer to https://github.com/pytorch/audio/issues/1337 " + "for more details about torchaudio's plan to migrate to native complex type." +) +def complex_norm( + complex_tensor: Tensor, + power: float = 1.0 +) -> Tensor: + r"""Compute the norm of complex tensor input. + + Args: + complex_tensor (Tensor): Tensor shape of `(..., complex=2)` + power (float): Power of the norm. (Default: `1.0`). + + Returns: + Tensor: Power of the normed input tensor. Shape of `(..., )` + """ + + # Replace by torch.norm once issue is fixed + # https://github.com/pytorch/pytorch/issues/34279 + return complex_tensor.pow(2.).sum(-1).pow(0.5 * power)
+ + +
[docs]@_mod_utils.deprecated( + "Please convert the input Tensor to complex type with `torch.view_as_complex` then " + "use `torch.angle`. " + "Please refer to https://github.com/pytorch/audio/issues/1337 " + "for more details about torchaudio's plan to migrate to native complex type." +) +def angle( + complex_tensor: Tensor +) -> Tensor: + r"""Compute the angle of complex tensor input. + + Args: + complex_tensor (Tensor): Tensor shape of `(..., complex=2)` + + Return: + Tensor: Angle of a complex tensor. Shape of `(..., )` + """ + return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0])
+ + +
[docs]@_mod_utils.deprecated( + "Please convert the input Tensor to complex type with `torch.view_as_complex` then " + "use `torch.abs` and `torch.angle`. " + "Please refer to https://github.com/pytorch/audio/issues/1337 " + "for more details about torchaudio's plan to migrate to native complex type." +) +def magphase( + complex_tensor: Tensor, + power: float = 1.0 +) -> Tuple[Tensor, Tensor]: + r"""Separate a complex-valued spectrogram with shape `(..., 2)` into its magnitude and phase. + + Args: + complex_tensor (Tensor): Tensor shape of `(..., complex=2)` + power (float): Power of the norm. (Default: `1.0`) + + Returns: + (Tensor, Tensor): The magnitude and phase of the complex tensor + """ + mag = complex_norm(complex_tensor, power) + phase = angle(complex_tensor) + return mag, phase
+ + +
[docs]def phase_vocoder( + complex_specgrams: Tensor, + rate: float, + phase_advance: Tensor +) -> Tensor: + r"""Given a STFT tensor, speed up in time without modifying pitch by a + factor of ``rate``. + + Args: + complex_specgrams (Tensor): + Either a real tensor of dimension of ``(..., freq, num_frame, complex=2)`` + or a tensor of dimension ``(..., freq, num_frame)`` with complex dtype. + rate (float): Speed-up factor + phase_advance (Tensor): Expected phase advance in each bin. Dimension of (freq, 1) + + Returns: + Tensor: + Stretched spectrogram. The resulting tensor is of the same dtype as the input + spectrogram, but the number of frames is changed to ``ceil(num_frame / rate)``. + + Example - With Tensor of complex dtype + >>> freq, hop_length = 1025, 512 + >>> # (channel, freq, time) + >>> complex_specgrams = torch.randn(2, freq, 300, dtype=torch.cfloat) + >>> rate = 1.3 # Speed up by 30% + >>> phase_advance = torch.linspace( + >>> 0, math.pi * hop_length, freq)[..., None] + >>> x = phase_vocoder(complex_specgrams, rate, phase_advance) + >>> x.shape # with 231 == ceil(300 / 1.3) + torch.Size([2, 1025, 231]) + + Example - With Tensor of real dtype and extra dimension for complex field + >>> freq, hop_length = 1025, 512 + >>> # (channel, freq, time, complex=2) + >>> complex_specgrams = torch.randn(2, freq, 300, 2) + >>> rate = 1.3 # Speed up by 30% + >>> phase_advance = torch.linspace( + >>> 0, math.pi * hop_length, freq)[..., None] + >>> x = phase_vocoder(complex_specgrams, rate, phase_advance) + >>> x.shape # with 231 == ceil(300 / 1.3) + torch.Size([2, 1025, 231, 2]) + """ + if rate == 1.0: + return complex_specgrams + + if not complex_specgrams.is_complex(): + warnings.warn( + "The use of pseudo complex type in `torchaudio.functional.phase_vocoder` and " + "`torchaudio.transforms.TimeStretch` is now deprecated." + "Please migrate to native complex type by converting the input tensor with " + "`torch.view_as_complex`. " + "Please refer to https://github.com/pytorch/audio/issues/1337 " + "for more details about torchaudio's plan to migrate to native complex type." + ) + if complex_specgrams.size(-1) != 2: + raise ValueError( + "complex_specgrams must be either native complex tensors or " + "real valued tensors with shape (..., 2)") + + is_complex = complex_specgrams.is_complex() + + if not is_complex: + complex_specgrams = torch.view_as_complex(complex_specgrams) + + # pack batch + shape = complex_specgrams.size() + complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-2:])) + + # Figures out the corresponding real dtype, i.e. complex128 -> float64, complex64 -> float32 + # Note torch.real is a view so it does not incur any memory copy. + real_dtype = torch.real(complex_specgrams).dtype + time_steps = torch.arange( + 0, + complex_specgrams.size(-1), + rate, + device=complex_specgrams.device, + dtype=real_dtype) + + alphas = time_steps % 1.0 + phase_0 = complex_specgrams[..., :1].angle() + + # Time Padding + complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 2]) + + # (new_bins, freq, 2) + complex_specgrams_0 = complex_specgrams.index_select(-1, time_steps.long()) + complex_specgrams_1 = complex_specgrams.index_select(-1, (time_steps + 1).long()) + + angle_0 = complex_specgrams_0.angle() + angle_1 = complex_specgrams_1.angle() + + norm_0 = complex_specgrams_0.abs() + norm_1 = complex_specgrams_1.abs() + + phase = angle_1 - angle_0 - phase_advance + phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi)) + + # Compute Phase Accum + phase = phase + phase_advance + phase = torch.cat([phase_0, phase[..., :-1]], dim=-1) + phase_acc = torch.cumsum(phase, -1) + + mag = alphas * norm_1 + (1 - alphas) * norm_0 + + complex_specgrams_stretch = torch.polar(mag, phase_acc) + + # unpack batch + complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-2] + complex_specgrams_stretch.shape[1:]) + + if not is_complex: + return torch.view_as_real(complex_specgrams_stretch) + return complex_specgrams_stretch
+ + +
[docs]def mask_along_axis_iid( + specgrams: Tensor, + mask_param: int, + mask_value: float, + axis: int +) -> Tensor: + r""" + Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where + ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``. + + Args: + specgrams (Tensor): Real spectrograms (batch, channel, freq, time) + mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param] + mask_value (float): Value to assign to the masked columns + axis (int): Axis to apply masking on (2 -> frequency, 3 -> time) + + Returns: + Tensor: Masked spectrograms of dimensions (batch, channel, freq, time) + """ + + if axis != 2 and axis != 3: + raise ValueError('Only Frequency and Time masking are supported') + + device = specgrams.device + dtype = specgrams.dtype + + value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param + min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value) + + # Create broadcastable mask + mask_start = min_value[..., None, None] + mask_end = (min_value + value)[..., None, None] + mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype) + + # Per batch example masking + specgrams = specgrams.transpose(axis, -1) + specgrams = specgrams.masked_fill((mask >= mask_start) & (mask < mask_end), mask_value) + specgrams = specgrams.transpose(axis, -1) + + return specgrams
+ + +
[docs]def mask_along_axis( + specgram: Tensor, + mask_param: int, + mask_value: float, + axis: int +) -> Tensor: + r""" + Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where + ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``. + All examples will have the same mask interval. + + Args: + specgram (Tensor): Real spectrogram (channel, freq, time) + mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param] + mask_value (float): Value to assign to the masked columns + axis (int): Axis to apply masking on (1 -> frequency, 2 -> time) + + Returns: + Tensor: Masked spectrogram of dimensions (channel, freq, time) + """ + if axis != 1 and axis != 2: + raise ValueError('Only Frequency and Time masking are supported') + + # pack batch + shape = specgram.size() + specgram = specgram.reshape([-1] + list(shape[-2:])) + value = torch.rand(1) * mask_param + min_value = torch.rand(1) * (specgram.size(axis) - value) + + mask_start = (min_value.long()).squeeze() + mask_end = (min_value.long() + value.long()).squeeze() + mask = torch.arange(0, specgram.shape[axis], device=specgram.device, dtype=specgram.dtype) + mask = (mask >= mask_start) & (mask < mask_end) + if axis == 1: + mask = mask.unsqueeze(-1) + + assert mask_end - mask_start < mask_param + + specgram = specgram.masked_fill(mask, mask_value) + + # unpack batch + specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:]) + + return specgram
+ + +
[docs]def compute_deltas( + specgram: Tensor, + win_length: int = 5, + mode: str = "replicate" +) -> Tensor: + r"""Compute delta coefficients of a tensor, usually a spectrogram: + + .. math:: + d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N}} n^2} + + where :math:`d_t` is the deltas at time :math:`t`, + :math:`c_t` is the spectrogram coeffcients at time :math:`t`, + :math:`N` is ``(win_length-1)//2``. + + Args: + specgram (Tensor): Tensor of audio of dimension (..., freq, time) + win_length (int, optional): The window length used for computing delta (Default: ``5``) + mode (str, optional): Mode parameter passed to padding (Default: ``"replicate"``) + + Returns: + Tensor: Tensor of deltas of dimension (..., freq, time) + + Example + >>> specgram = torch.randn(1, 40, 1000) + >>> delta = compute_deltas(specgram) + >>> delta2 = compute_deltas(delta) + """ + device = specgram.device + dtype = specgram.dtype + + # pack batch + shape = specgram.size() + specgram = specgram.reshape(1, -1, shape[-1]) + + assert win_length >= 3 + + n = (win_length - 1) // 2 + + # twice sum of integer squared + denom = n * (n + 1) * (2 * n + 1) / 3 + + specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode) + + kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1) + + output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom + + # unpack batch + output = output.reshape(shape) + + return output
+ + +def _compute_nccf( + waveform: Tensor, + sample_rate: int, + frame_time: float, + freq_low: int +) -> Tensor: + r""" + Compute Normalized Cross-Correlation Function (NCCF). + + .. math:: + \phi_i(m) = \frac{\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\sqrt{E(b_i) E(m+b_i)}}, + + where + :math:`\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`, + :math:`w` is the waveform, + :math:`N` is the length of a frame, + :math:`b_i` is the beginning of frame :math:`i`, + :math:`E(j)` is the energy :math:`\sum_{n=j}^{j+N-1} w^2(n)`. + """ + + EPSILON = 10 ** (-9) + + # Number of lags to check + lags = int(math.ceil(sample_rate / freq_low)) + + frame_size = int(math.ceil(sample_rate * frame_time)) + + waveform_length = waveform.size()[-1] + num_of_frames = int(math.ceil(waveform_length / frame_size)) + + p = lags + num_of_frames * frame_size - waveform_length + waveform = torch.nn.functional.pad(waveform, (0, p)) + + # Compute lags + output_lag = [] + for lag in range(1, lags + 1): + s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] + s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] + + output_frames = ( + (s1 * s2).sum(-1) + / (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2) + / (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2) + ) + + output_lag.append(output_frames.unsqueeze(-1)) + + nccf = torch.cat(output_lag, -1) + + return nccf + + +def _combine_max( + a: Tuple[Tensor, Tensor], + b: Tuple[Tensor, Tensor], + thresh: float = 0.99 +) -> Tuple[Tensor, Tensor]: + """ + Take value from first if bigger than a multiplicative factor of the second, elementwise. + """ + mask = (a[0] > thresh * b[0]) + values = mask * a[0] + ~mask * b[0] + indices = mask * a[1] + ~mask * b[1] + return values, indices + + +def _find_max_per_frame( + nccf: Tensor, + sample_rate: int, + freq_high: int +) -> Tensor: + r""" + For each frame, take the highest value of NCCF, + apply centered median smoothing, and convert to frequency. + + Note: If the max among all the lags is very close + to the first half of lags, then the latter is taken. + """ + + lag_min = int(math.ceil(sample_rate / freq_high)) + + # Find near enough max that is smallest + + best = torch.max(nccf[..., lag_min:], -1) + + half_size = nccf.shape[-1] // 2 + half = torch.max(nccf[..., lag_min:half_size], -1) + + best = _combine_max(half, best) + indices = best[1] + + # Add back minimal lag + indices += lag_min + # Add 1 empirical calibration offset + indices += 1 + + return indices + + +def _median_smoothing( + indices: Tensor, + win_length: int +) -> Tensor: + r""" + Apply median smoothing to the 1D tensor over the given window. + """ + + # Centered windowed + pad_length = (win_length - 1) // 2 + + # "replicate" padding in any dimension + indices = torch.nn.functional.pad( + indices, (pad_length, 0), mode="constant", value=0. + ) + + indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1) + roll = indices.unfold(-1, win_length, 1) + + values, _ = torch.median(roll, -1) + return values + + +
[docs]def detect_pitch_frequency( + waveform: Tensor, + sample_rate: int, + frame_time: float = 10 ** (-2), + win_length: int = 30, + freq_low: int = 85, + freq_high: int = 3400, +) -> Tensor: + r"""Detect pitch frequency. + + It is implemented using normalized cross-correlation function and median smoothing. + + Args: + waveform (Tensor): Tensor of audio of dimension (..., freq, time) + sample_rate (int): The sample rate of the waveform (Hz) + frame_time (float, optional): Duration of a frame (Default: ``10 ** (-2)``). + win_length (int, optional): The window length for median smoothing (in number of frames) (Default: ``30``). + freq_low (int, optional): Lowest frequency that can be detected (Hz) (Default: ``85``). + freq_high (int, optional): Highest frequency that can be detected (Hz) (Default: ``3400``). + + Returns: + Tensor: Tensor of freq of dimension (..., frame) + """ + # pack batch + shape = list(waveform.size()) + waveform = waveform.reshape([-1] + shape[-1:]) + + nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low) + indices = _find_max_per_frame(nccf, sample_rate, freq_high) + indices = _median_smoothing(indices, win_length) + + # Convert indices to frequency + EPSILON = 10 ** (-9) + freq = sample_rate / (EPSILON + indices.to(torch.float)) + + # unpack batch + freq = freq.reshape(shape[:-1] + list(freq.shape[-1:])) + + return freq
+ + +
[docs]def sliding_window_cmn( + specgram: Tensor, + cmn_window: int = 600, + min_cmn_window: int = 100, + center: bool = False, + norm_vars: bool = False, +) -> Tensor: + r""" + Apply sliding-window cepstral mean (and optionally variance) normalization per utterance. + + Args: + specgram (Tensor): Tensor of audio of dimension (..., time, freq) + cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600) + min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start). + Only applicable if center == false, ignored if center==true (int, default = 100) + center (bool, optional): If true, use a window centered on the current frame + (to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false) + norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false) + + Returns: + Tensor: Tensor matching input shape (..., freq, time) + """ + input_shape = specgram.shape + num_frames, num_feats = input_shape[-2:] + specgram = specgram.view(-1, num_frames, num_feats) + num_channels = specgram.shape[0] + + dtype = specgram.dtype + device = specgram.device + last_window_start = last_window_end = -1 + cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) + cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) + cmn_specgram = torch.zeros( + num_channels, num_frames, num_feats, dtype=dtype, device=device) + for t in range(num_frames): + window_start = 0 + window_end = 0 + if center: + window_start = t - cmn_window // 2 + window_end = window_start + cmn_window + else: + window_start = t - cmn_window + window_end = t + 1 + if window_start < 0: + window_end -= window_start + window_start = 0 + if not center: + if window_end > t: + window_end = max(t + 1, min_cmn_window) + if window_end > num_frames: + window_start -= (window_end - num_frames) + window_end = num_frames + if window_start < 0: + window_start = 0 + if last_window_start == -1: + input_part = specgram[:, window_start: window_end - window_start, :] + cur_sum += torch.sum(input_part, 1) + if norm_vars: + cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :] + else: + if window_start > last_window_start: + frame_to_remove = specgram[:, last_window_start, :] + cur_sum -= frame_to_remove + if norm_vars: + cur_sumsq -= (frame_to_remove ** 2) + if window_end > last_window_end: + frame_to_add = specgram[:, last_window_end, :] + cur_sum += frame_to_add + if norm_vars: + cur_sumsq += (frame_to_add ** 2) + window_frames = window_end - window_start + last_window_start = window_start + last_window_end = window_end + cmn_specgram[:, t, :] = specgram[:, t, :] - cur_sum / window_frames + if norm_vars: + if window_frames == 1: + cmn_specgram[:, t, :] = torch.zeros( + num_channels, num_feats, dtype=dtype, device=device) + else: + variance = cur_sumsq + variance = variance / window_frames + variance -= ((cur_sum ** 2) / (window_frames ** 2)) + variance = torch.pow(variance, -0.5) + cmn_specgram[:, t, :] *= variance + + cmn_specgram = cmn_specgram.view(input_shape[:-2] + (num_frames, num_feats)) + if len(input_shape) == 2: + cmn_specgram = cmn_specgram.squeeze(0) + return cmn_specgram
+ + +
[docs]def spectral_centroid( + waveform: Tensor, + sample_rate: int, + pad: int, + window: Tensor, + n_fft: int, + hop_length: int, + win_length: int, +) -> Tensor: + r""" + Compute the spectral centroid for each channel along the time axis. + + The spectral centroid is defined as the weighted average of the + frequency values, weighted by their magnitude. + + Args: + waveform (Tensor): Tensor of audio of dimension (..., time) + sample_rate (int): Sample rate of the audio waveform + pad (int): Two sided padding of signal + window (Tensor): Window tensor that is applied/multiplied to each frame/window + n_fft (int): Size of FFT + hop_length (int): Length of hop between STFT windows + win_length (int): Window size + + Returns: + Tensor: Dimension (..., time) + """ + specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length, + win_length=win_length, power=1., normalized=False) + freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2, + device=specgram.device).reshape((-1, 1)) + freq_dim = -2 + return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim)
+ + +
[docs]@_mod_utils.requires_sox() +def apply_codec( + waveform: Tensor, + sample_rate: int, + format: str, + channels_first: bool = True, + compression: Optional[float] = None, + encoding: Optional[str] = None, + bits_per_sample: Optional[int] = None, +) -> Tensor: + r""" + Apply codecs as a form of augmentation. + + Args: + waveform (Tensor): Audio data. Must be 2 dimensional. See also ```channels_first```. + sample_rate (int): Sample rate of the audio waveform. + format (str): File format. + channels_first (bool): + When True, both the input and output Tensor have dimension ``[channel, time]``. + Otherwise, they have dimension ``[time, channel]``. + compression (float): Used for formats other than WAV. + For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. + encoding (str, optional): Changes the encoding for the supported formats. + For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. + bits_per_sample (int, optional): Changes the bit depth for the supported formats. + For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. + + Returns: + torch.Tensor: Resulting Tensor. + If ``channels_first=True``, it has ``[channel, time]`` else ``[time, channel]``. + """ + bytes = io.BytesIO() + torchaudio.backend.sox_io_backend.save(bytes, + waveform, + sample_rate, + channels_first, + compression, + format, + encoding, + bits_per_sample + ) + bytes.seek(0) + augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file( + bytes, effects=[["rate", f"{sample_rate}"]], channels_first=channels_first, format=format) + return augmented
+ + +
[docs]@_mod_utils.requires_kaldi() +def compute_kaldi_pitch( + waveform: torch.Tensor, + sample_rate: float, + frame_length: float = 25.0, + frame_shift: float = 10.0, + min_f0: float = 50, + max_f0: float = 400, + soft_min_f0: float = 10.0, + penalty_factor: float = 0.1, + lowpass_cutoff: float = 1000, + resample_frequency: float = 4000, + delta_pitch: float = 0.005, + nccf_ballast: float = 7000, + lowpass_filter_width: int = 1, + upsample_filter_width: int = 5, + max_frames_latency: int = 0, + frames_per_chunk: int = 0, + simulate_first_pass_online: bool = False, + recompute_frame: int = 500, + snip_edges: bool = True, +) -> torch.Tensor: + """Extract pitch based on method described in *A pitch extraction algorithm tuned + for automatic speech recognition* [:footcite:`6854049`]. + + This function computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi. + + Args: + waveform (Tensor): + The input waveform of shape `(..., time)`. + sample_rate (float): + Sample rate of `waveform`. + frame_length (float, optional): + Frame length in milliseconds. (default: 25.0) + frame_shift (float, optional): + Frame shift in milliseconds. (default: 10.0) + min_f0 (float, optional): + Minimum F0 to search for (Hz) (default: 50.0) + max_f0 (float, optional): + Maximum F0 to search for (Hz) (default: 400.0) + soft_min_f0 (float, optional): + Minimum f0, applied in soft way, must not exceed min-f0 (default: 10.0) + penalty_factor (float, optional): + Cost factor for FO change. (default: 0.1) + lowpass_cutoff (float, optional): + Cutoff frequency for LowPass filter (Hz) (default: 1000) + resample_frequency (float, optional): + Frequency that we down-sample the signal to. Must be more than twice lowpass-cutoff. + (default: 4000) + delta_pitch( float, optional): + Smallest relative change in pitch that our algorithm measures. (default: 0.005) + nccf_ballast (float, optional): + Increasing this factor reduces NCCF for quiet frames (default: 7000) + lowpass_filter_width (int, optional): + Integer that determines filter width of lowpass filter, more gives sharper filter. + (default: 1) + upsample_filter_width (int, optional): + Integer that determines filter width when upsampling NCCF. (default: 5) + max_frames_latency (int, optional): + Maximum number of frames of latency that we allow pitch tracking to introduce into + the feature processing (affects output only if ``frames_per_chunk > 0`` and + ``simulate_first_pass_online=True``) (default: 0) + frames_per_chunk (int, optional): + The number of frames used for energy normalization. (default: 0) + simulate_first_pass_online (bool, optional): + If true, the function will output features that correspond to what an online decoder + would see in the first pass of decoding -- not the final version of the features, + which is the default. (default: False) + Relevant if ``frames_per_chunk > 0``. + recompute_frame (int, optional): + Only relevant for compatibility with online pitch extraction. + A non-critical parameter; the frame at which we recompute some of the forward pointers, + after revising our estimate of the signal energy. + Relevant if ``frames_per_chunk > 0``. (default: 500) + snip_edges (bool, optional): + If this is set to false, the incomplete frames near the ending edge won't be snipped, + so that the number of frames is the file size divided by the frame-shift. + This makes different types of features give the same number of frames. (default: True) + + Returns: + Tensor: Pitch feature. Shape: ``(batch, frames 2)`` where the last dimension + corresponds to pitch and NCCF. + """ + shape = waveform.shape + waveform = waveform.reshape(-1, shape[-1]) + result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch( + waveform, sample_rate, frame_length, frame_shift, + min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff, + resample_frequency, delta_pitch, nccf_ballast, + lowpass_filter_width, upsample_filter_width, max_frames_latency, + frames_per_chunk, simulate_first_pass_online, recompute_frame, + snip_edges, + ) + result = result.reshape(shape[:-1] + result.shape[-2:]) + return result
+ + +def _get_sinc_resample_kernel( + orig_freq: float, + new_freq: float, + gcd: int, + lowpass_filter_width: int, + rolloff: float, + resampling_method: str, + beta: Optional[float], + device: torch.device = torch.device("cpu"), + dtype: Optional[torch.dtype] = None): + + if not (int(orig_freq) == orig_freq and int(new_freq) == new_freq): + warnings.warn( + "Non-integer frequencies are being cast to ints and may result in poor resampling quality " + "because the underlying algorithm requires an integer ratio between `orig_freq` and `new_freq`. " + "Using non-integer valued frequencies will throw an error in release 0.10. " + "To work around this issue, manually convert both frequencies to integer values " + "that maintain their resampling rate ratio before passing them into the function " + "Example: To downsample a 44100 hz waveform by a factor of 8, use " + "`orig_freq=8` and `new_freq=1` instead of `orig_freq=44100` and `new_freq=5512.5` " + "For more information or to leave feedback about this change, please refer to " + "https://github.com/pytorch/audio/issues/1487." + ) + + if resampling_method not in ['sinc_interpolation', 'kaiser_window']: + raise ValueError('Invalid resampling method: {}'.format(resampling_method)) + + orig_freq = int(orig_freq) // gcd + new_freq = int(new_freq) // gcd + + assert lowpass_filter_width > 0 + kernels = [] + base_freq = min(orig_freq, new_freq) + # This will perform antialiasing filtering by removing the highest frequencies. + # At first I thought I only needed this when downsampling, but when upsampling + # you will get edge artifacts without this, as the edge is equivalent to zero padding, + # which will add high freq artifacts. + base_freq *= rolloff + + # The key idea of the algorithm is that x(t) can be exactly reconstructed from x[i] (tensor) + # using the sinc interpolation formula: + # x(t) = sum_i x[i] sinc(pi * orig_freq * (i / orig_freq - t)) + # We can then sample the function x(t) with a different sample rate: + # y[j] = x(j / new_freq) + # or, + # y[j] = sum_i x[i] sinc(pi * orig_freq * (i / orig_freq - j / new_freq)) + + # We see here that y[j] is the convolution of x[i] with a specific filter, for which + # we take an FIR approximation, stopping when we see at least `lowpass_filter_width` zeros crossing. + # But y[j+1] is going to have a different set of weights and so on, until y[j + new_freq]. + # Indeed: + # y[j + new_freq] = sum_i x[i] sinc(pi * orig_freq * ((i / orig_freq - (j + new_freq) / new_freq)) + # = sum_i x[i] sinc(pi * orig_freq * ((i - orig_freq) / orig_freq - j / new_freq)) + # = sum_i x[i + orig_freq] sinc(pi * orig_freq * (i / orig_freq - j / new_freq)) + # so y[j+new_freq] uses the same filter as y[j], but on a shifted version of x by `orig_freq`. + # This will explain the F.conv1d after, with a stride of orig_freq. + width = math.ceil(lowpass_filter_width * orig_freq / base_freq) + # If orig_freq is still big after GCD reduction, most filters will be very unbalanced, i.e., + # they will have a lot of almost zero values to the left or to the right... + # There is probably a way to evaluate those filters more efficiently, but this is kept for + # future work. + idx_dtype = dtype if dtype is not None else torch.float64 + idx = torch.arange(-width, width + orig_freq, device=device, dtype=idx_dtype) + + for i in range(new_freq): + t = (-i / new_freq + idx / orig_freq) * base_freq + t = t.clamp_(-lowpass_filter_width, lowpass_filter_width) + + # we do not use built in torch windows here as we need to evaluate the window + # at specific positions, not over a regular grid. + if resampling_method == "sinc_interpolation": + window = torch.cos(t * math.pi / lowpass_filter_width / 2)**2 + else: + # kaiser_window + if beta is None: + beta = 14.769656459379492 + beta_tensor = torch.tensor(float(beta)) + window = torch.i0(beta_tensor * torch.sqrt(1 - (t / lowpass_filter_width) ** 2)) / torch.i0(beta_tensor) + t *= math.pi + kernel = torch.where(t == 0, torch.tensor(1.).to(t), torch.sin(t) / t) + kernel.mul_(window) + kernels.append(kernel) + + scale = base_freq / orig_freq + kernels = torch.stack(kernels).view(new_freq, 1, -1).mul_(scale) + if dtype is None: + kernels = kernels.to(dtype=torch.float32) + return kernels, width + + +def _apply_sinc_resample_kernel( + waveform: Tensor, + orig_freq: float, + new_freq: float, + gcd: int, + kernel: Tensor, + width: int, +): + orig_freq = int(orig_freq) // gcd + new_freq = int(new_freq) // gcd + + # pack batch + shape = waveform.size() + waveform = waveform.view(-1, shape[-1]) + + num_wavs, length = waveform.shape + waveform = torch.nn.functional.pad(waveform, (width, width + orig_freq)) + resampled = torch.nn.functional.conv1d(waveform[:, None], kernel, stride=orig_freq) + resampled = resampled.transpose(1, 2).reshape(num_wavs, -1) + target_length = int(math.ceil(new_freq * length / orig_freq)) + resampled = resampled[..., :target_length] + + # unpack batch + resampled = resampled.view(shape[:-1] + resampled.shape[-1:]) + return resampled + + +
[docs]def resample( + waveform: Tensor, + orig_freq: float, + new_freq: float, + lowpass_filter_width: int = 6, + rolloff: float = 0.99, + resampling_method: str = "sinc_interpolation", + beta: Optional[float] = None, +) -> Tensor: + r"""Resamples the waveform at the new frequency using bandlimited interpolation. + + https://ccrma.stanford.edu/~jos/resample/Theory_Ideal_Bandlimited_Interpolation.html + + Note: + ``transforms.Resample`` precomputes and reuses the resampling kernel, so using it will result in + more efficient computation if resampling multiple waveforms with the same resampling parameters. + + Args: + waveform (Tensor): The input signal of dimension (..., time) + orig_freq (float): The original frequency of the signal + new_freq (float): The desired frequency + lowpass_filter_width (int, optional): Controls the sharpness of the filter, more == sharper + but less efficient. (Default: ``6``) + rolloff (float, optional): The roll-off frequency of the filter, as a fraction of the Nyquist. + Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: ``0.99``) + resampling_method (str, optional): The resampling method to use. + Options: [``sinc_interpolation``, ``kaiser_window``] (Default: ``'sinc_interpolation'``) + beta (float or None): The shape parameter used for kaiser window. + + Returns: + Tensor: The waveform at the new frequency of dimension (..., time). + """ + + assert orig_freq > 0.0 and new_freq > 0.0 + + if orig_freq == new_freq: + return waveform + + gcd = math.gcd(int(orig_freq), int(new_freq)) + + kernel, width = _get_sinc_resample_kernel(orig_freq, new_freq, gcd, lowpass_filter_width, rolloff, + resampling_method, beta, waveform.device, waveform.dtype) + resampled = _apply_sinc_resample_kernel(waveform, orig_freq, new_freq, gcd, kernel, width) + return resampled
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/kaldi_io.html b/0.9.0/_modules/torchaudio/kaldi_io.html new file mode 100644 index 0000000000..18729b4294 --- /dev/null +++ b/0.9.0/_modules/torchaudio/kaldi_io.html @@ -0,0 +1,753 @@ + + + + + + + + + + + + torchaudio.kaldi_io — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.kaldi_io

+# To use this file, the dependency (https://github.com/vesis84/kaldi-io-for-python)
+# needs to be installed. This is a light wrapper around kaldi_io that returns
+# torch.Tensors.
+from typing import Any, Callable, Iterable, Tuple
+
+import torch
+from torch import Tensor
+from torchaudio._internal import module_utils as _mod_utils
+
+if _mod_utils.is_module_available('kaldi_io', 'numpy'):
+    import numpy as np
+    import kaldi_io
+
+
+__all__ = [
+    'read_vec_int_ark',
+    'read_vec_flt_scp',
+    'read_vec_flt_ark',
+    'read_mat_scp',
+    'read_mat_ark',
+]
+
+
+def _convert_method_output_to_tensor(file_or_fd: Any,
+                                     fn: Callable,
+                                     convert_contiguous: bool = False) -> Iterable[Tuple[str, Tensor]]:
+    r"""Takes a method invokes it. The output is converted to a tensor.
+
+    Args:
+        file_or_fd (str/FileDescriptor): File name or file descriptor
+        fn (Callable): Function that has the signature (file name/descriptor) and converts it to
+            Iterable[Tuple[str, Tensor]].
+        convert_contiguous (bool, optional): Determines whether the array should be converted into a
+            contiguous layout. (Default: ``False``)
+
+    Returns:
+        Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is vec/mat
+    """
+    for key, np_arr in fn(file_or_fd):
+        if convert_contiguous:
+            np_arr = np.ascontiguousarray(np_arr)
+        yield key, torch.from_numpy(np_arr)
+
+
+
[docs]@_mod_utils.requires_module('kaldi_io', 'numpy') +def read_vec_int_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]: + r"""Create generator of (key,vector<int>) tuples, which reads from the ark file/stream. + + Args: + file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor + + Returns: + Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file + + Example + >>> # read ark to a 'dictionary' + >>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_int_ark(file) } + """ + # Requires convert_contiguous to be True because elements from int32 vector are + # sorted in tuples: (sizeof(int32), value) so strides are (5,) instead of (4,) which will throw an error + # in from_numpy as it expects strides to be a multiple of 4 (int32). + return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_int_ark, convert_contiguous=True)
+ + +
[docs]@_mod_utils.requires_module('kaldi_io', 'numpy') +def read_vec_flt_scp(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]: + r"""Create generator of (key,vector<float32/float64>) tuples, read according to Kaldi scp. + + Args: + file_or_fd (str/FileDescriptor): scp, gzipped scp, pipe or opened file descriptor + + Returns: + Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file + + Example + >>> # read scp to a 'dictionary' + >>> # d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_scp(file) } + """ + return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_flt_scp)
+ + +
[docs]@_mod_utils.requires_module('kaldi_io', 'numpy') +def read_vec_flt_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]: + r"""Create generator of (key,vector<float32/float64>) tuples, which reads from the ark file/stream. + + Args: + file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor + + Returns: + Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file + + Example + >>> # read ark to a 'dictionary' + >>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_ark(file) } + """ + return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_flt_ark)
+ + +
[docs]@_mod_utils.requires_module('kaldi_io', 'numpy') +def read_mat_scp(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]: + r"""Create generator of (key,matrix<float32/float64>) tuples, read according to Kaldi scp. + + Args: + file_or_fd (str/FileDescriptor): scp, gzipped scp, pipe or opened file descriptor + + Returns: + Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the matrix read from file + + Example + >>> # read scp to a 'dictionary' + >>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_scp(file) } + """ + return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_mat_scp)
+ + +
[docs]@_mod_utils.requires_module('kaldi_io', 'numpy') +def read_mat_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]: + r"""Create generator of (key,matrix<float32/float64>) tuples, which reads from the ark file/stream. + + Args: + file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor + + Returns: + Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the matrix read from file + + Example + >>> # read ark to a 'dictionary' + >>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_ark(file) } + """ + return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_mat_ark)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/models/conv_tasnet.html b/0.9.0/_modules/torchaudio/models/conv_tasnet.html new file mode 100644 index 0000000000..5bad20cf2d --- /dev/null +++ b/0.9.0/_modules/torchaudio/models/conv_tasnet.html @@ -0,0 +1,933 @@ + + + + + + + + + + + + torchaudio.models.conv_tasnet — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.models.conv_tasnet

+"""Implements Conv-TasNet with building blocks of it.
+
+Based on https://github.com/naplab/Conv-TasNet/tree/e66d82a8f956a69749ec8a4ae382217faa097c5c
+"""
+
+from typing import Tuple, Optional
+
+import torch
+
+
+class ConvBlock(torch.nn.Module):
+    """1D Convolutional block.
+
+    Args:
+        io_channels (int): The number of input/output channels, <B, Sc>
+        hidden_channels (int): The number of channels in the internal layers, <H>.
+        kernel_size (int): The convolution kernel size of the middle layer, <P>.
+        padding (int): Padding value of the convolution in the middle layer.
+        dilation (int): Dilation value of the convolution in the middle layer.
+        no_redisual (bool): Disable residual block/output.
+
+    Note:
+        This implementation corresponds to the "non-causal" setting in the paper.
+    """
+
+    def __init__(
+        self,
+        io_channels: int,
+        hidden_channels: int,
+        kernel_size: int,
+        padding: int,
+        dilation: int = 1,
+        no_residual: bool = False,
+    ):
+        super().__init__()
+
+        self.conv_layers = torch.nn.Sequential(
+            torch.nn.Conv1d(
+                in_channels=io_channels, out_channels=hidden_channels, kernel_size=1
+            ),
+            torch.nn.PReLU(),
+            torch.nn.GroupNorm(num_groups=1, num_channels=hidden_channels, eps=1e-08),
+            torch.nn.Conv1d(
+                in_channels=hidden_channels,
+                out_channels=hidden_channels,
+                kernel_size=kernel_size,
+                padding=padding,
+                dilation=dilation,
+                groups=hidden_channels,
+            ),
+            torch.nn.PReLU(),
+            torch.nn.GroupNorm(num_groups=1, num_channels=hidden_channels, eps=1e-08),
+        )
+
+        self.res_out = (
+            None
+            if no_residual
+            else torch.nn.Conv1d(
+                in_channels=hidden_channels, out_channels=io_channels, kernel_size=1
+            )
+        )
+        self.skip_out = torch.nn.Conv1d(
+            in_channels=hidden_channels, out_channels=io_channels, kernel_size=1
+        )
+
+    def forward(
+        self, input: torch.Tensor
+    ) -> Tuple[Optional[torch.Tensor], torch.Tensor]:
+        feature = self.conv_layers(input)
+        if self.res_out is None:
+            residual = None
+        else:
+            residual = self.res_out(feature)
+        skip_out = self.skip_out(feature)
+        return residual, skip_out
+
+
+class MaskGenerator(torch.nn.Module):
+    """TCN (Temporal Convolution Network) Separation Module
+
+    Generates masks for separation.
+
+    Args:
+        input_dim (int): Input feature dimension, <N>.
+        num_sources (int): The number of sources to separate.
+        kernel_size (int): The convolution kernel size of conv blocks, <P>.
+        num_featrs (int): Input/output feature dimenstion of conv blocks, <B, Sc>.
+        num_hidden (int): Intermediate feature dimention of conv blocks, <H>
+        num_layers (int): The number of conv blocks in one stack, <X>.
+        num_stacks (int): The number of conv block stacks, <R>.
+
+    Note:
+        This implementation corresponds to the "non-causal" setting in the paper.
+    """
+
+    def __init__(
+        self,
+        input_dim: int,
+        num_sources: int,
+        kernel_size: int,
+        num_feats: int,
+        num_hidden: int,
+        num_layers: int,
+        num_stacks: int,
+    ):
+        super().__init__()
+
+        self.input_dim = input_dim
+        self.num_sources = num_sources
+
+        self.input_norm = torch.nn.GroupNorm(
+            num_groups=1, num_channels=input_dim, eps=1e-8
+        )
+        self.input_conv = torch.nn.Conv1d(
+            in_channels=input_dim, out_channels=num_feats, kernel_size=1
+        )
+
+        self.receptive_field = 0
+        self.conv_layers = torch.nn.ModuleList([])
+        for s in range(num_stacks):
+            for l in range(num_layers):
+                multi = 2 ** l
+                self.conv_layers.append(
+                    ConvBlock(
+                        io_channels=num_feats,
+                        hidden_channels=num_hidden,
+                        kernel_size=kernel_size,
+                        dilation=multi,
+                        padding=multi,
+                        # The last ConvBlock does not need residual
+                        no_residual=(l == (num_layers - 1) and s == (num_stacks - 1)),
+                    )
+                )
+                self.receptive_field += (
+                    kernel_size if s == 0 and l == 0 else (kernel_size - 1) * multi
+                )
+        self.output_prelu = torch.nn.PReLU()
+        self.output_conv = torch.nn.Conv1d(
+            in_channels=num_feats, out_channels=input_dim * num_sources, kernel_size=1,
+        )
+
+    def forward(self, input: torch.Tensor) -> torch.Tensor:
+        """Generate separation mask.
+
+        Args:
+            input (torch.Tensor): 3D Tensor with shape [batch, features, frames]
+
+        Returns:
+            torch.Tensor: shape [batch, num_sources, features, frames]
+        """
+        batch_size = input.shape[0]
+        feats = self.input_norm(input)
+        feats = self.input_conv(feats)
+        output = 0.0
+        for layer in self.conv_layers:
+            residual, skip = layer(feats)
+            if residual is not None:  # the last conv layer does not produce residual
+                feats = feats + residual
+            output = output + skip
+        output = self.output_prelu(output)
+        output = self.output_conv(output)
+        output = torch.sigmoid(output)
+        return output.view(batch_size, self.num_sources, self.input_dim, -1)
+
+
+
[docs]class ConvTasNet(torch.nn.Module): + """Conv-TasNet: a fully-convolutional time-domain audio separation network + *Conv-TasNet: Surpassing Ideal Time–Frequency Magnitude Masking for Speech Separation* + [:footcite:`Luo_2019`]. + + Args: + num_sources (int): The number of sources to split. + enc_kernel_size (int): The convolution kernel size of the encoder/decoder, <L>. + enc_num_feats (int): The feature dimensions passed to mask generator, <N>. + msk_kernel_size (int): The convolution kernel size of the mask generator, <P>. + msk_num_feats (int): The input/output feature dimension of conv block in the mask generator, <B, Sc>. + msk_num_hidden_feats (int): The internal feature dimension of conv block of the mask generator, <H>. + msk_num_layers (int): The number of layers in one conv block of the mask generator, <X>. + msk_num_stacks (int): The numbr of conv blocks of the mask generator, <R>. + + Note: + This implementation corresponds to the "non-causal" setting in the paper. + """ + + def __init__( + self, + num_sources: int = 2, + # encoder/decoder parameters + enc_kernel_size: int = 16, + enc_num_feats: int = 512, + # mask generator parameters + msk_kernel_size: int = 3, + msk_num_feats: int = 128, + msk_num_hidden_feats: int = 512, + msk_num_layers: int = 8, + msk_num_stacks: int = 3, + ): + super().__init__() + + self.num_sources = num_sources + self.enc_num_feats = enc_num_feats + self.enc_kernel_size = enc_kernel_size + self.enc_stride = enc_kernel_size // 2 + + self.encoder = torch.nn.Conv1d( + in_channels=1, + out_channels=enc_num_feats, + kernel_size=enc_kernel_size, + stride=self.enc_stride, + padding=self.enc_stride, + bias=False, + ) + self.mask_generator = MaskGenerator( + input_dim=enc_num_feats, + num_sources=num_sources, + kernel_size=msk_kernel_size, + num_feats=msk_num_feats, + num_hidden=msk_num_hidden_feats, + num_layers=msk_num_layers, + num_stacks=msk_num_stacks, + ) + self.decoder = torch.nn.ConvTranspose1d( + in_channels=enc_num_feats, + out_channels=1, + kernel_size=enc_kernel_size, + stride=self.enc_stride, + padding=self.enc_stride, + bias=False, + ) + + def _align_num_frames_with_strides( + self, input: torch.Tensor + ) -> Tuple[torch.Tensor, int]: + """Pad input Tensor so that the end of the input tensor corresponds with + + 1. (if kernel size is odd) the center of the last convolution kernel + or 2. (if kernel size is even) the end of the first half of the last convolution kernel + + Assumption: + The resulting Tensor will be padded with the size of stride (== kernel_width // 2) + on the both ends in Conv1D + + |<--- k_1 --->| + | | |<-- k_n-1 -->| + | | | |<--- k_n --->| + | | | | | + | | | | | + | v v v | + |<---->|<--- input signal --->|<--->|<---->| + stride PAD stride + + Args: + input (torch.Tensor): 3D Tensor with shape (batch_size, channels==1, frames) + + Returns: + torch.Tensor: Padded Tensor + int: Number of paddings performed + """ + batch_size, num_channels, num_frames = input.shape + is_odd = self.enc_kernel_size % 2 + num_strides = (num_frames - is_odd) // self.enc_stride + num_remainings = num_frames - (is_odd + num_strides * self.enc_stride) + if num_remainings == 0: + return input, 0 + + num_paddings = self.enc_stride - num_remainings + pad = torch.zeros( + batch_size, + num_channels, + num_paddings, + dtype=input.dtype, + device=input.device, + ) + return torch.cat([input, pad], 2), num_paddings + +
[docs] def forward(self, input: torch.Tensor) -> torch.Tensor: + """Perform source separation. Generate audio source waveforms. + + Args: + input (torch.Tensor): 3D Tensor with shape [batch, channel==1, frames] + + Returns: + torch.Tensor: 3D Tensor with shape [batch, channel==num_sources, frames] + """ + if input.ndim != 3 or input.shape[1] != 1: + raise ValueError( + f"Expected 3D tensor (batch, channel==1, frames). Found: {input.shape}" + ) + + # B: batch size + # L: input frame length + # L': padded input frame length + # F: feature dimension + # M: feature frame length + # S: number of sources + + padded, num_pads = self._align_num_frames_with_strides(input) # B, 1, L' + batch_size, num_padded_frames = padded.shape[0], padded.shape[2] + feats = self.encoder(padded) # B, F, M + masked = self.mask_generator(feats) * feats.unsqueeze(1) # B, S, F, M + masked = masked.view( + batch_size * self.num_sources, self.enc_num_feats, -1 + ) # B*S, F, M + decoded = self.decoder(masked) # B*S, 1, L' + output = decoded.view( + batch_size, self.num_sources, num_padded_frames + ) # B, S, L' + if num_pads > 0: + output = output[..., :-num_pads] # B, S, L + return output
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/models/deepspeech.html b/0.9.0/_modules/torchaudio/models/deepspeech.html new file mode 100644 index 0000000000..e76f7fddb4 --- /dev/null +++ b/0.9.0/_modules/torchaudio/models/deepspeech.html @@ -0,0 +1,714 @@ + + + + + + + + + + + + torchaudio.models.deepspeech — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.models.deepspeech

+import torch
+
+__all__ = ["DeepSpeech"]
+
+
+class FullyConnected(torch.nn.Module):
+    """
+    Args:
+        n_feature: Number of input features
+        n_hidden: Internal hidden unit size.
+    """
+
+    def __init__(self,
+                 n_feature: int,
+                 n_hidden: int,
+                 dropout: float,
+                 relu_max_clip: int = 20) -> None:
+        super(FullyConnected, self).__init__()
+        self.fc = torch.nn.Linear(n_feature, n_hidden, bias=True)
+        self.relu_max_clip = relu_max_clip
+        self.dropout = dropout
+
+    def forward(self, x: torch.Tensor) -> torch.Tensor:
+        x = self.fc(x)
+        x = torch.nn.functional.relu(x)
+        x = torch.nn.functional.hardtanh(x, 0, self.relu_max_clip)
+        if self.dropout:
+            x = torch.nn.functional.dropout(x, self.dropout, self.training)
+        return x
+
+
+
[docs]class DeepSpeech(torch.nn.Module): + """ + DeepSpeech model architecture from *Deep Speech: Scaling up end-to-end speech recognition* + [:footcite:`hannun2014deep`]. + + Args: + n_feature: Number of input features + n_hidden: Internal hidden unit size. + n_class: Number of output classes + """ + + def __init__( + self, + n_feature: int, + n_hidden: int = 2048, + n_class: int = 40, + dropout: float = 0.0, + ) -> None: + super(DeepSpeech, self).__init__() + self.n_hidden = n_hidden + self.fc1 = FullyConnected(n_feature, n_hidden, dropout) + self.fc2 = FullyConnected(n_hidden, n_hidden, dropout) + self.fc3 = FullyConnected(n_hidden, n_hidden, dropout) + self.bi_rnn = torch.nn.RNN( + n_hidden, n_hidden, num_layers=1, nonlinearity="relu", bidirectional=True + ) + self.fc4 = FullyConnected(n_hidden, n_hidden, dropout) + self.out = torch.nn.Linear(n_hidden, n_class) + +
[docs] def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x (torch.Tensor): Tensor of dimension (batch, channel, time, feature). + Returns: + Tensor: Predictor tensor of dimension (batch, time, class). + """ + # N x C x T x F + x = self.fc1(x) + # N x C x T x H + x = self.fc2(x) + # N x C x T x H + x = self.fc3(x) + # N x C x T x H + x = x.squeeze(1) + # N x T x H + x = x.transpose(0, 1) + # T x N x H + x, _ = self.bi_rnn(x) + # The fifth (non-recurrent) layer takes both the forward and backward units as inputs + x = x[:, :, :self.n_hidden] + x[:, :, self.n_hidden:] + # T x N x H + x = self.fc4(x) + # T x N x H + x = self.out(x) + # T x N x n_class + x = x.permute(1, 0, 2) + # N x T x n_class + x = torch.nn.functional.log_softmax(x, dim=2) + # N x T x n_class + return x
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/models/wav2letter.html b/0.9.0/_modules/torchaudio/models/wav2letter.html new file mode 100644 index 0000000000..36fc6da77d --- /dev/null +++ b/0.9.0/_modules/torchaudio/models/wav2letter.html @@ -0,0 +1,697 @@ + + + + + + + + + + + + torchaudio.models.wav2letter — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.models.wav2letter

+from torch import Tensor
+from torch import nn
+
+__all__ = [
+    "Wav2Letter",
+]
+
+
+
[docs]class Wav2Letter(nn.Module): + r"""Wav2Letter model architecture from *Wav2Letter: an End-to-End ConvNet-based Speech + Recognition System* [:footcite:`collobert2016wav2letter`]. + + :math:`\text{padding} = \frac{\text{ceil}(\text{kernel} - \text{stride})}{2}` + + Args: + num_classes (int, optional): Number of classes to be classified. (Default: ``40``) + input_type (str, optional): Wav2Letter can use as input: ``waveform``, ``power_spectrum`` + or ``mfcc`` (Default: ``waveform``). + num_features (int, optional): Number of input features that the network will receive (Default: ``1``). + """ + + def __init__(self, num_classes: int = 40, + input_type: str = "waveform", + num_features: int = 1) -> None: + super(Wav2Letter, self).__init__() + + acoustic_num_features = 250 if input_type == "waveform" else num_features + acoustic_model = nn.Sequential( + nn.Conv1d(in_channels=acoustic_num_features, out_channels=250, kernel_size=48, stride=2, padding=23), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=2000, kernel_size=32, stride=1, padding=16), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=2000, out_channels=2000, kernel_size=1, stride=1, padding=0), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=2000, out_channels=num_classes, kernel_size=1, stride=1, padding=0), + nn.ReLU(inplace=True) + ) + + if input_type == "waveform": + waveform_model = nn.Sequential( + nn.Conv1d(in_channels=num_features, out_channels=250, kernel_size=250, stride=160, padding=45), + nn.ReLU(inplace=True) + ) + self.acoustic_model = nn.Sequential(waveform_model, acoustic_model) + + if input_type in ["power_spectrum", "mfcc"]: + self.acoustic_model = acoustic_model + +
[docs] def forward(self, x: Tensor) -> Tensor: + r""" + Args: + x (torch.Tensor): Tensor of dimension (batch_size, num_features, input_length). + + Returns: + Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length). + """ + + x = self.acoustic_model(x) + x = nn.functional.log_softmax(x, dim=1) + return x
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/models/wav2vec2/model.html b/0.9.0/_modules/torchaudio/models/wav2vec2/model.html new file mode 100644 index 0000000000..94e88b79af --- /dev/null +++ b/0.9.0/_modules/torchaudio/models/wav2vec2/model.html @@ -0,0 +1,870 @@ + + + + + + + + + + + + torchaudio.models.wav2vec2.model — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.models.wav2vec2.model

+from typing import Optional, Tuple, List
+
+from torch import Tensor
+from torch.nn import Module
+
+from . import components
+
+
+
[docs]class Wav2Vec2Model(Module): + """Encoder model used in *wav2vec 2.0* [:footcite:`baevski2020wav2vec`]. + + Note: + To build the model, please use one of the factory functions. + + Args: + feature_extractor (torch.nn.Module): + Feature extractor that extracts feature vectors from raw audio Tensor. + + encoder (torch.nn.Module): + Encoder that converts the audio features into the sequence of probability + distribution (in negative log-likelihood) over labels. + """ + def __init__( + self, + feature_extractor: Module, + encoder: Module, + ): + super().__init__() + self.feature_extractor = feature_extractor + self.encoder = encoder + +
[docs] def extract_features( + self, + waveforms: Tensor, + lengths: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + """Extract feature vectors from raw waveforms + + Args: + waveforms (Tensor): Audio tensor of shape ``(batch, frames)``. + lengths (Tensor, optional): + Indicates the valid length of each audio sample in the batch. + Shape: ``(batch, )``. + + Returns: + Tensor: + Feature vectors. + Shape: ``(batch, frames, feature dimention)`` + Tensor, optional: + Indicates the valid length of each feature in the batch, computed + based on the given ``lengths`` argument. + Shape: ``(batch, )``. + """ + return self.feature_extractor(waveforms, lengths)
+ +
[docs] def forward( + self, + waveforms: Tensor, + lengths: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + """Compute the sequence of probability distribution over labels. + + Args: + waveforms (Tensor): Audio tensor of shape ``(batch, frames)``. + lengths (Tensor, optional): + Indicates the valid length of each audio sample in the batch. + Shape: ``(batch, )``. + + Returns: + Tensor: + The sequences of probability distribution (in logit) over labels. + Shape: ``(batch, frames, num labels)``. + Tensor, optional: + Indicates the valid length of each feature in the batch, computed + based on the given ``lengths`` argument. + Shape: ``(batch, )``. + """ + x, lengths = self.feature_extractor(waveforms, lengths) + return self.encoder(x, lengths), lengths
+ + +def _get_model( + extractor_mode: str, + extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]], + extractor_conv_bias: bool, + encoder_embed_dim: int, + encoder_projection_dropout: float, + encoder_pos_conv_kernel: int, + encoder_pos_conv_groups: int, + encoder_num_layers: int, + encoder_num_heads: int, + encoder_attention_dropout: float, + encoder_ff_interm_features: int, + encoder_ff_interm_dropout: float, + encoder_dropout: float, + encoder_layer_norm_first: bool, + encoder_layer_drop: float, + encoder_num_out: int, +) -> Wav2Vec2Model: + if extractor_conv_layer_config is None: + extractor_conv_layer_config = [(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2 + + feature_extractor = components._get_feature_extractor( + extractor_mode, extractor_conv_layer_config, extractor_conv_bias) + encoder = components._get_encoder( + in_features=extractor_conv_layer_config[-1][0], + embed_dim=encoder_embed_dim, + dropout_input=encoder_projection_dropout, + pos_conv_kernel=encoder_pos_conv_kernel, + pos_conv_groups=encoder_pos_conv_groups, + num_layers=encoder_num_layers, + num_heads=encoder_num_heads, + attention_dropout=encoder_attention_dropout, + ff_interm_features=encoder_ff_interm_features, + ff_interm_dropout=encoder_ff_interm_dropout, + dropout=encoder_dropout, + layer_norm_first=encoder_layer_norm_first, + layer_drop=encoder_layer_drop, + num_out=encoder_num_out, + ) + return Wav2Vec2Model(feature_extractor, encoder) + + +
[docs]def wav2vec2_base(num_out: int) -> Wav2Vec2Model: + """Build wav2vec2.0 model with "Base" configuration from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`]. + + Args: + num_out: int + The number of output labels. + + Returns: + Wav2Vec2Model: The resulting model. + + Example - Reload fine-tuned model from Hugging Face: + >>> # Session 1 - Convert pretrained model from Hugging Face and save the parameters. + >>> from torchaudio.models.wav2vec2.utils import import_huggingface_model + >>> + >>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") + >>> model = import_huggingface_model(original) + >>> torch.save(model.state_dict(), "wav2vec2-base-960h.pt") + >>> + >>> # Session 2 - Load model and the parameters + >>> model = wav2vec2_base(num_out=32) + >>> model.load_state_dict(torch.load("wav2vec2-base-960h.pt")) + """ + return _get_model( + extractor_mode="group_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=False, + encoder_embed_dim=768, + encoder_projection_dropout=0.1, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=12, + encoder_num_heads=12, + encoder_attention_dropout=0.1, + encoder_ff_interm_features=3072, + encoder_ff_interm_dropout=0.1, + encoder_dropout=0.1, + encoder_layer_norm_first=False, + encoder_layer_drop=0.1, + encoder_num_out=num_out, + )
+ + +
[docs]def wav2vec2_large(num_out: int) -> Wav2Vec2Model: + """Build wav2vec2.0 model with "Large" configuration from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`]. + + Args: + num_out: int + The number of output labels. + + Returns: + Wav2Vec2Model: The resulting model. + + Example - Reload fine-tuned model from Hugging Face: + >>> # Session 1 - Convert pretrained model from Hugging Face and save the parameters. + >>> from torchaudio.models.wav2vec2.utils import import_huggingface_model + >>> + >>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h") + >>> model = import_huggingface_model(original) + >>> torch.save(model.state_dict(), "wav2vec2-base-960h.pt") + >>> + >>> # Session 2 - Load model and the parameters + >>> model = wav2vec2_large(num_out=32) + >>> model.load_state_dict(torch.load("wav2vec2-base-960h.pt")) + """ + return _get_model( + extractor_mode="group_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=False, + encoder_embed_dim=1024, + encoder_projection_dropout=0.1, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=24, + encoder_num_heads=16, + encoder_attention_dropout=0.1, + encoder_ff_interm_features=4096, + encoder_ff_interm_dropout=0.1, + encoder_dropout=0.1, + encoder_layer_norm_first=False, + encoder_layer_drop=0.1, + encoder_num_out=num_out, + )
+ + +
[docs]def wav2vec2_large_lv60k(num_out: int) -> Wav2Vec2Model: + """Build wav2vec2.0 model with "Large LV-60k" configuration from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`]. + + Args: + num_out: int + The number of output labels. + + Returns: + Wav2Vec2Model: The resulting model. + + Example - Reload fine-tuned model from Hugging Face: + >>> # Session 1 - Convert pretrained model from Hugging Face and save the parameters. + >>> from torchaudio.models.wav2vec2.utils import import_huggingface_model + >>> + >>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self") + >>> model = import_huggingface_model(original) + >>> torch.save(model.state_dict(), "wav2vec2-base-960h.pt") + >>> + >>> # Session 2 - Load model and the parameters + >>> model = wav2vec2_large_lv60k(num_out=32) + >>> model.load_state_dict(torch.load("wav2vec2-base-960h.pt")) + """ + return _get_model( + extractor_mode="layer_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=True, + encoder_embed_dim=1024, + encoder_projection_dropout=0.1, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=24, + encoder_num_heads=16, + encoder_attention_dropout=0.0, + encoder_ff_interm_features=4096, + encoder_ff_interm_dropout=0.1, + encoder_dropout=0.0, + encoder_layer_norm_first=True, + encoder_layer_drop=0.1, + encoder_num_out=num_out, + )
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/models/wav2vec2/utils/import_fairseq.html b/0.9.0/_modules/torchaudio/models/wav2vec2/utils/import_fairseq.html new file mode 100644 index 0000000000..d5a2a4a924 --- /dev/null +++ b/0.9.0/_modules/torchaudio/models/wav2vec2/utils/import_fairseq.html @@ -0,0 +1,828 @@ + + + + + + + + + + + + torchaudio.models.wav2vec2.utils.import_fairseq — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.models.wav2vec2.utils.import_fairseq

+"""Import fariseq's wav2vec2.0 pretrained weights to torchaudios's format.
+
+For this module to work, you need `fairseq`.
+"""
+import re
+from typing import Optional
+
+from torch.nn import Module
+
+from ..model import Wav2Vec2Model, _get_model
+
+
+def _parse_config(w2v_model, num_out):
+    encoder = w2v_model.encoder
+    conv_layers = w2v_model.feature_extractor.conv_layers
+
+    extractor_mode = 'layer_norm'
+    if 'GroupNorm' in conv_layers[0][2].__class__.__name__:
+        extractor_mode = 'group_norm'
+    else:
+        extractor_mode = 'layer_norm'
+
+    conv_layer_config = [(l[0].out_channels, l[0].kernel_size[0], l[0].stride[0]) for l in conv_layers]
+
+    if all(l[0].bias is None for l in conv_layers):
+        conv_bias = False
+    elif all(l[0].bias is not None for l in conv_layers):
+        conv_bias = True
+    else:
+        raise ValueError(
+            'Either all the convolutions layers have bias term or none of them should.')
+
+    config = {
+        'extractor_mode': extractor_mode,
+        'extractor_conv_layer_config': conv_layer_config,
+        'extractor_conv_bias': conv_bias,
+        'encoder_embed_dim': w2v_model.post_extract_proj.out_features,
+        'encoder_projection_dropout': w2v_model.dropout_input.p,
+        'encoder_pos_conv_kernel': encoder.pos_conv[0].kernel_size[0],
+        'encoder_pos_conv_groups': encoder.pos_conv[0].groups,
+        'encoder_num_layers': len(encoder.layers),
+        'encoder_num_heads': encoder.layers[0].self_attn.num_heads,
+        'encoder_attention_dropout': encoder.layers[0].self_attn.dropout_module.p,
+        'encoder_ff_interm_features': encoder.layers[0].fc1.out_features,
+        'encoder_ff_interm_dropout': encoder.layers[0].dropout2.p,
+        'encoder_dropout': encoder.layers[0].dropout3.p,
+        'encoder_layer_norm_first': encoder.layer_norm_first,
+        'encoder_layer_drop': encoder.layerdrop,
+        'encoder_num_out': num_out,
+    }
+    return config
+
+
+def _map_key(key):
+    key_ = key
+    if key.startswith('w2v_model.'):
+        key = key.replace('w2v_model.', '')
+    if re.match(r'(mask_emb|quantizer|project_q|final_proj|mask_emb)', key):
+        return None
+    # Feature Extractor
+    # Group norm when "extractor_mode" is "default".
+    # (Only the first layer)
+    # "conv_layers.0.2.weight" -> "conv_layers.0.layer_norm.weight"
+    # "conv_layers.0.2.bias"   -> "conv_layers.0.layer_norm.bias"
+    match = re.match(r'feature_extractor\.conv_layers\.0\.2\.(weight|bias)', key)
+    if match:
+        return f"feature_extractor.conv_layers.0.layer_norm.{match.group(1)}"
+    # Convolutions
+    # "conv_layers.X.0.weight" -> "conv_layers.X.conv.weight"
+    # "conv_layers.X.0.bias"   -> "conv_layers.X.conv.bias"
+    match = re.match(r'feature_extractor\.conv_layers\.(\d+)\.0\.(weight|bias)', key)
+    if match:
+        return f"feature_extractor.conv_layers.{match.group(1)}.conv.{match.group(2)}"
+    # Layer norm when "extractor_mode" is "layer_norm".
+    # "conv_layers.X.2.1.weight" -> "conv_layers.X.layer_norm.weight"
+    # "conv_layers.X.2.1.bias"   -> "conv_layers.X.layer_norm.bias"
+    match = re.match(r'feature_extractor\.conv_layers\.(\d+)\.2\.1\.(weight|bias)', key)
+    if match:
+        return f"feature_extractor.conv_layers.{match.group(1)}.layer_norm.{match.group(2)}"
+    match = re.match(r"post_extract_proj\.(weight|bias)", key)
+    # Encoder - Feature projection
+    if match:
+        return f"encoder.feature_projection.projection.{match.group(1)}"
+    match = re.match(r"layer_norm\.(weight|bias)", key)
+    if match:
+        return f"encoder.feature_projection.layer_norm.{match.group(1)}"
+    # Encoder - Transformer - Convolutional positional embedding
+    match = re.match(r"encoder\.pos_conv\.0\.(bias|weight_g|weight_v)", key)
+    if match:
+        return f"encoder.transformer.pos_conv_embed.conv.{match.group(1)}"
+    match = re.match(r"encoder\.layer_norm\.(weight|bias)", key)
+    if match:
+        return f"encoder.transformer.layer_norm.{match.group(1)}"
+    # Encoder - Transformer - Self attention layers
+    match = re.match(r"encoder\.layers\.(\d+)\.self_attn\.((k_|v_|q_|out_)proj\.(weight|bias))", key)
+    if match:
+        return f"encoder.transformer.layers.{match.group(1)}.attention.{match.group(2)}"
+    match = re.match(r"encoder\.layers\.(\d+)\.self_attn_layer_norm\.(weight|bias)", key)
+    if match:
+        return f"encoder.transformer.layers.{match.group(1)}.layer_norm.{match.group(2)}"
+    match = re.match(r"encoder\.layers\.(\d+)\.fc1\.(weight|bias)", key)
+    if match:
+        return f"encoder.transformer.layers.{match.group(1)}.feed_forward.intermediate_dense.{match.group(2)}"
+    match = re.match(r"encoder\.layers\.(\d+)\.fc2\.(weight|bias)", key)
+    if match:
+        return f"encoder.transformer.layers.{match.group(1)}.feed_forward.output_dense.{match.group(2)}"
+    match = re.match(r"encoder\.layers\.(\d+)\.final_layer_norm\.(weight|bias)", key)
+    if match:
+        return f"encoder.transformer.layers.{match.group(1)}.final_layer_norm.{match.group(2)}"
+    match = re.match(r"proj\.(weight|bias)", key)
+    # Encoder - Readout layer
+    if match:
+        return f"encoder.readout.{match.group(1)}"
+    raise ValueError(f'Unexpected key: {key_}')
+
+
+def _convert_state_dict(state_dict):
+    converted = {}
+    for k, v in state_dict.items():
+        k = _map_key(k)
+        if k is not None:
+            converted[k] = v
+    return converted
+
+
+
[docs]def import_fairseq_model( + original: Module, + num_out: Optional[int] = None) -> Wav2Vec2Model: + """Build Wav2Vec2Model from pretrained parameters published by `fairseq`_. + + Args: + original (torch.nn.Module): + An instance of fairseq's Wav2Vec2.0 model class. + Either ``fairseq.models.wav2vec.wav2vec2_asr.Wav2VecEncoder`` or + ``fairseq.models.wav2vec.wav2vec2.Wav2Vec2Model``. + num_out (int, optional): + The number of output labels. Required only when the original model is + an instance of ``fairseq.models.wav2vec.wav2vec2.Wav2Vec2Model``. + + Returns: + Wav2Vec2Model: Imported model. + + Example - Loading pretrain-only model + >>> from torchaudio.models.wav2vec2.utils import import_fairseq_model + >>> + >>> # Load model using fairseq + >>> model_file = 'wav2vec_small.pt' + >>> model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file]) + >>> original = model[0] + >>> imported = import_fairseq_model(original, num_out=28) + >>> + >>> # Perform feature extraction + >>> waveform, _ = torchaudio.load('audio.wav') + >>> features, _ = imported.extract_features(waveform) + >>> + >>> # Compare result with the original model from fairseq + >>> reference = original.feature_extractor(waveform).transpose(1, 2) + >>> torch.testing.assert_allclose(features, reference) + + Example - Fine-tuned model + >>> from torchaudio.models.wav2vec2.utils import import_fairseq_model + >>> + >>> # Load model using fairseq + >>> model_file = 'wav2vec_small_960h.pt' + >>> model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file]) + >>> original = model[0] + >>> imported = import_fairseq_model(original.w2v_encoder) + >>> + >>> # Perform encoding + >>> waveform, _ = torchaudio.load('audio.wav') + >>> emission, _ = imported(waveform) + >>> + >>> # Compare result with the original model from fairseq + >>> mask = torch.zeros_like(waveform) + >>> reference = original(waveform, mask)['encoder_out'].transpose(0, 1) + >>> torch.testing.assert_allclose(emission, reference) + + .. _fairseq: https://github.com/pytorch/fairseq + """ + class_ = original.__class__.__name__ + if class_ == 'Wav2Vec2Model': + if num_out is None: + raise ValueError( + 'When importing a pretrained model without readout layer, ' + '`num_out` argument must be given.' + ) + return _import_pretrained(original, num_out) + if class_ == 'Wav2VecEncoder': + return _import_finetuned(original) + raise ValueError( + f'Expected an instance of `Wav2Vec2Model` or `Wav2VecEncoder`. Found: {class_}')
+ + +def _import_finetuned(original: Module) -> Wav2Vec2Model: + config = _parse_config(original.w2v_model, original.proj.out_features) + model = _get_model(**config) + model.load_state_dict(_convert_state_dict(original.state_dict())) + return model + + +def _import_pretrained(original: Module, num_out: int) -> Wav2Vec2Model: + config = _parse_config(original, num_out) + model = _get_model(**config) + model.load_state_dict(_convert_state_dict(original.state_dict()), strict=False) + return model +
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/models/wav2vec2/utils/import_huggingface.html b/0.9.0/_modules/torchaudio/models/wav2vec2/utils/import_huggingface.html new file mode 100644 index 0000000000..a5fcc28385 --- /dev/null +++ b/0.9.0/_modules/torchaudio/models/wav2vec2/utils/import_huggingface.html @@ -0,0 +1,694 @@ + + + + + + + + + + + + torchaudio.models.wav2vec2.utils.import_huggingface — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.models.wav2vec2.utils.import_huggingface

+"""Import Hugging Face transformers's wav2vec2.0 pretrained weights to torchaudios's format.
+"""
+import logging
+
+from torch.nn import Module
+
+from ..model import Wav2Vec2Model, _get_model
+
+_LG = logging.getLogger(__name__)
+
+
+def _get_config(cfg):
+    config = {
+        'extractor_mode': f'{cfg.feat_extract_norm}_norm',
+        'extractor_conv_layer_config': list(zip(cfg.conv_dim, cfg.conv_kernel, cfg.conv_stride)),
+        'extractor_conv_bias': cfg.conv_bias,
+        'encoder_embed_dim': cfg.hidden_size,
+        'encoder_projection_dropout': cfg.feat_proj_dropout,
+        'encoder_pos_conv_kernel': cfg.num_conv_pos_embeddings,
+        'encoder_pos_conv_groups': cfg.num_conv_pos_embedding_groups,
+        'encoder_num_layers': cfg.num_hidden_layers,
+        'encoder_num_heads': cfg.num_attention_heads,
+        'encoder_attention_dropout': cfg.attention_dropout,
+        'encoder_ff_interm_features': cfg.intermediate_size,
+        'encoder_ff_interm_dropout': cfg.activation_dropout,
+        'encoder_dropout': cfg.hidden_dropout,
+        'encoder_layer_norm_first': cfg.do_stable_layer_norm,
+        'encoder_layer_drop': cfg.layerdrop,
+        'encoder_num_out': cfg.vocab_size,
+    }
+    return config
+
+
+def _build(config, original):
+    imported = _get_model(**config)
+    imported.feature_extractor.load_state_dict(original.wav2vec2.feature_extractor.state_dict())
+    imported.encoder.feature_projection.load_state_dict(original.wav2vec2.feature_projection.state_dict())
+    imported.encoder.transformer.load_state_dict(original.wav2vec2.encoder.state_dict())
+    imported.encoder.readout.load_state_dict(original.lm_head.state_dict())
+    return imported
+
+
+
[docs]def import_huggingface_model(original: Module) -> Wav2Vec2Model: + """Import wav2vec2 model from Hugging Face's `Transformers`_. + + Args: + original (torch.nn.Module): An instance of ``Wav2Vec2ForCTC`` from ``transformers``. + + Returns: + Wav2Vec2Model: Imported model. + + Example + >>> from torchaudio.models.wav2vec2.utils import import_huggingface_model + >>> + >>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") + >>> model = import_huggingface_model(original) + >>> + >>> waveforms, _ = torchaudio.load("audio.wav") + >>> logits, _ = model(waveforms) + + .. _Transformers: https://huggingface.co/transformers/ + """ + _LG.info('Importing model.') + if original.__class__.__name__ != 'Wav2Vec2ForCTC': + _LG.warning('The model is not an instance of Wav2Vec2ForCTC') + _LG.info('Loading model configuration.') + config = _get_config(original.config) + _LG.debug(' - config: %s', config) + _LG.info('Building model.') + imported = _build(config, original) + return imported
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/models/wavernn.html b/0.9.0/_modules/torchaudio/models/wavernn.html new file mode 100644 index 0000000000..2cd83910ac --- /dev/null +++ b/0.9.0/_modules/torchaudio/models/wavernn.html @@ -0,0 +1,949 @@ + + + + + + + + + + + + torchaudio.models.wavernn — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.models.wavernn

+from typing import List, Tuple
+
+import torch
+from torch import Tensor
+from torch import nn
+
+__all__ = [
+    "ResBlock",
+    "MelResNet",
+    "Stretch2d",
+    "UpsampleNetwork",
+    "WaveRNN",
+]
+
+
+class ResBlock(nn.Module):
+    r"""ResNet block based on *Efficient Neural Audio Synthesis* [:footcite:`kalchbrenner2018efficient`].
+
+    Args:
+        n_freq: the number of bins in a spectrogram. (Default: ``128``)
+
+    Examples
+        >>> resblock = ResBlock()
+        >>> input = torch.rand(10, 128, 512)  # a random spectrogram
+        >>> output = resblock(input)  # shape: (10, 128, 512)
+    """
+
+    def __init__(self, n_freq: int = 128) -> None:
+        super().__init__()
+
+        self.resblock_model = nn.Sequential(
+            nn.Conv1d(in_channels=n_freq, out_channels=n_freq, kernel_size=1, bias=False),
+            nn.BatchNorm1d(n_freq),
+            nn.ReLU(inplace=True),
+            nn.Conv1d(in_channels=n_freq, out_channels=n_freq, kernel_size=1, bias=False),
+            nn.BatchNorm1d(n_freq)
+        )
+
+    def forward(self, specgram: Tensor) -> Tensor:
+        r"""Pass the input through the ResBlock layer.
+        Args:
+            specgram (Tensor): the input sequence to the ResBlock layer (n_batch, n_freq, n_time).
+
+        Return:
+            Tensor shape: (n_batch, n_freq, n_time)
+        """
+
+        return self.resblock_model(specgram) + specgram
+
+
+class MelResNet(nn.Module):
+    r"""MelResNet layer uses a stack of ResBlocks on spectrogram.
+
+    Args:
+        n_res_block: the number of ResBlock in stack. (Default: ``10``)
+        n_freq: the number of bins in a spectrogram. (Default: ``128``)
+        n_hidden: the number of hidden dimensions of resblock. (Default: ``128``)
+        n_output: the number of output dimensions of melresnet. (Default: ``128``)
+        kernel_size: the number of kernel size in the first Conv1d layer. (Default: ``5``)
+
+    Examples
+        >>> melresnet = MelResNet()
+        >>> input = torch.rand(10, 128, 512)  # a random spectrogram
+        >>> output = melresnet(input)  # shape: (10, 128, 508)
+    """
+
+    def __init__(self,
+                 n_res_block: int = 10,
+                 n_freq: int = 128,
+                 n_hidden: int = 128,
+                 n_output: int = 128,
+                 kernel_size: int = 5) -> None:
+        super().__init__()
+
+        ResBlocks = [ResBlock(n_hidden) for _ in range(n_res_block)]
+
+        self.melresnet_model = nn.Sequential(
+            nn.Conv1d(in_channels=n_freq, out_channels=n_hidden, kernel_size=kernel_size, bias=False),
+            nn.BatchNorm1d(n_hidden),
+            nn.ReLU(inplace=True),
+            *ResBlocks,
+            nn.Conv1d(in_channels=n_hidden, out_channels=n_output, kernel_size=1)
+        )
+
+    def forward(self, specgram: Tensor) -> Tensor:
+        r"""Pass the input through the MelResNet layer.
+        Args:
+            specgram (Tensor): the input sequence to the MelResNet layer (n_batch, n_freq, n_time).
+
+        Return:
+            Tensor shape: (n_batch, n_output, n_time - kernel_size + 1)
+        """
+
+        return self.melresnet_model(specgram)
+
+
+class Stretch2d(nn.Module):
+    r"""Upscale the frequency and time dimensions of a spectrogram.
+
+    Args:
+        time_scale: the scale factor in time dimension
+        freq_scale: the scale factor in frequency dimension
+
+    Examples
+        >>> stretch2d = Stretch2d(time_scale=10, freq_scale=5)
+
+        >>> input = torch.rand(10, 100, 512)  # a random spectrogram
+        >>> output = stretch2d(input)  # shape: (10, 500, 5120)
+    """
+
+    def __init__(self,
+                 time_scale: int,
+                 freq_scale: int) -> None:
+        super().__init__()
+
+        self.freq_scale = freq_scale
+        self.time_scale = time_scale
+
+    def forward(self, specgram: Tensor) -> Tensor:
+        r"""Pass the input through the Stretch2d layer.
+
+        Args:
+            specgram (Tensor): the input sequence to the Stretch2d layer (..., n_freq, n_time).
+
+        Return:
+            Tensor shape: (..., n_freq * freq_scale, n_time * time_scale)
+        """
+
+        return specgram.repeat_interleave(self.freq_scale, -2).repeat_interleave(self.time_scale, -1)
+
+
+class UpsampleNetwork(nn.Module):
+    r"""Upscale the dimensions of a spectrogram.
+
+    Args:
+        upsample_scales: the list of upsample scales.
+        n_res_block: the number of ResBlock in stack. (Default: ``10``)
+        n_freq: the number of bins in a spectrogram. (Default: ``128``)
+        n_hidden: the number of hidden dimensions of resblock. (Default: ``128``)
+        n_output: the number of output dimensions of melresnet. (Default: ``128``)
+        kernel_size: the number of kernel size in the first Conv1d layer. (Default: ``5``)
+
+    Examples
+        >>> upsamplenetwork = UpsampleNetwork(upsample_scales=[4, 4, 16])
+        >>> input = torch.rand(10, 128, 10)  # a random spectrogram
+        >>> output = upsamplenetwork(input)  # shape: (10, 1536, 128), (10, 1536, 128)
+    """
+
+    def __init__(self,
+                 upsample_scales: List[int],
+                 n_res_block: int = 10,
+                 n_freq: int = 128,
+                 n_hidden: int = 128,
+                 n_output: int = 128,
+                 kernel_size: int = 5) -> None:
+        super().__init__()
+
+        total_scale = 1
+        for upsample_scale in upsample_scales:
+            total_scale *= upsample_scale
+
+        self.indent = (kernel_size - 1) // 2 * total_scale
+        self.resnet = MelResNet(n_res_block, n_freq, n_hidden, n_output, kernel_size)
+        self.resnet_stretch = Stretch2d(total_scale, 1)
+
+        up_layers = []
+        for scale in upsample_scales:
+            stretch = Stretch2d(scale, 1)
+            conv = nn.Conv2d(in_channels=1,
+                             out_channels=1,
+                             kernel_size=(1, scale * 2 + 1),
+                             padding=(0, scale),
+                             bias=False)
+            conv.weight.data.fill_(1. / (scale * 2 + 1))
+            up_layers.append(stretch)
+            up_layers.append(conv)
+        self.upsample_layers = nn.Sequential(*up_layers)
+
+    def forward(self, specgram: Tensor) -> Tuple[Tensor, Tensor]:
+        r"""Pass the input through the UpsampleNetwork layer.
+
+        Args:
+            specgram (Tensor): the input sequence to the UpsampleNetwork layer (n_batch, n_freq, n_time)
+
+        Return:
+            Tensor shape: (n_batch, n_freq, (n_time - kernel_size + 1) * total_scale),
+                          (n_batch, n_output, (n_time - kernel_size + 1) * total_scale)
+        where total_scale is the product of all elements in upsample_scales.
+        """
+
+        resnet_output = self.resnet(specgram).unsqueeze(1)
+        resnet_output = self.resnet_stretch(resnet_output)
+        resnet_output = resnet_output.squeeze(1)
+
+        specgram = specgram.unsqueeze(1)
+        upsampling_output = self.upsample_layers(specgram)
+        upsampling_output = upsampling_output.squeeze(1)[:, :, self.indent:-self.indent]
+
+        return upsampling_output, resnet_output
+
+
+
[docs]class WaveRNN(nn.Module): + r"""WaveRNN model based on the implementation from `fatchord <https://github.com/fatchord/WaveRNN>`_. + + The original implementation was introduced in *Efficient Neural Audio Synthesis* + [:footcite:`kalchbrenner2018efficient`]. The input channels of waveform and spectrogram have to be 1. + The product of `upsample_scales` must equal `hop_length`. + + Args: + upsample_scales: the list of upsample scales. + n_classes: the number of output classes. + hop_length: the number of samples between the starts of consecutive frames. + n_res_block: the number of ResBlock in stack. (Default: ``10``) + n_rnn: the dimension of RNN layer. (Default: ``512``) + n_fc: the dimension of fully connected layer. (Default: ``512``) + kernel_size: the number of kernel size in the first Conv1d layer. (Default: ``5``) + n_freq: the number of bins in a spectrogram. (Default: ``128``) + n_hidden: the number of hidden dimensions of resblock. (Default: ``128``) + n_output: the number of output dimensions of melresnet. (Default: ``128``) + + Example + >>> wavernn = WaveRNN(upsample_scales=[5,5,8], n_classes=512, hop_length=200) + >>> waveform, sample_rate = torchaudio.load(file) + >>> # waveform shape: (n_batch, n_channel, (n_time - kernel_size + 1) * hop_length) + >>> specgram = MelSpectrogram(sample_rate)(waveform) # shape: (n_batch, n_channel, n_freq, n_time) + >>> output = wavernn(waveform, specgram) + >>> # output shape: (n_batch, n_channel, (n_time - kernel_size + 1) * hop_length, n_classes) + """ + + def __init__(self, + upsample_scales: List[int], + n_classes: int, + hop_length: int, + n_res_block: int = 10, + n_rnn: int = 512, + n_fc: int = 512, + kernel_size: int = 5, + n_freq: int = 128, + n_hidden: int = 128, + n_output: int = 128) -> None: + super().__init__() + + self.kernel_size = kernel_size + self.n_rnn = n_rnn + self.n_aux = n_output // 4 + self.hop_length = hop_length + self.n_classes = n_classes + + total_scale = 1 + for upsample_scale in upsample_scales: + total_scale *= upsample_scale + if total_scale != self.hop_length: + raise ValueError(f"Expected: total_scale == hop_length, but found {total_scale} != {hop_length}") + + self.upsample = UpsampleNetwork(upsample_scales, + n_res_block, + n_freq, + n_hidden, + n_output, + kernel_size) + self.fc = nn.Linear(n_freq + self.n_aux + 1, n_rnn) + + self.rnn1 = nn.GRU(n_rnn, n_rnn, batch_first=True) + self.rnn2 = nn.GRU(n_rnn + self.n_aux, n_rnn, batch_first=True) + + self.relu1 = nn.ReLU(inplace=True) + self.relu2 = nn.ReLU(inplace=True) + + self.fc1 = nn.Linear(n_rnn + self.n_aux, n_fc) + self.fc2 = nn.Linear(n_fc + self.n_aux, n_fc) + self.fc3 = nn.Linear(n_fc, self.n_classes) + +
[docs] def forward(self, waveform: Tensor, specgram: Tensor) -> Tensor: + r"""Pass the input through the WaveRNN model. + + Args: + waveform: the input waveform to the WaveRNN layer (n_batch, 1, (n_time - kernel_size + 1) * hop_length) + specgram: the input spectrogram to the WaveRNN layer (n_batch, 1, n_freq, n_time) + + Return: + Tensor shape: (n_batch, 1, (n_time - kernel_size + 1) * hop_length, n_classes) + """ + + assert waveform.size(1) == 1, 'Require the input channel of waveform is 1' + assert specgram.size(1) == 1, 'Require the input channel of specgram is 1' + # remove channel dimension until the end + waveform, specgram = waveform.squeeze(1), specgram.squeeze(1) + + batch_size = waveform.size(0) + h1 = torch.zeros(1, batch_size, self.n_rnn, dtype=waveform.dtype, device=waveform.device) + h2 = torch.zeros(1, batch_size, self.n_rnn, dtype=waveform.dtype, device=waveform.device) + # output of upsample: + # specgram: (n_batch, n_freq, (n_time - kernel_size + 1) * total_scale) + # aux: (n_batch, n_output, (n_time - kernel_size + 1) * total_scale) + specgram, aux = self.upsample(specgram) + specgram = specgram.transpose(1, 2) + aux = aux.transpose(1, 2) + + aux_idx = [self.n_aux * i for i in range(5)] + a1 = aux[:, :, aux_idx[0]:aux_idx[1]] + a2 = aux[:, :, aux_idx[1]:aux_idx[2]] + a3 = aux[:, :, aux_idx[2]:aux_idx[3]] + a4 = aux[:, :, aux_idx[3]:aux_idx[4]] + + x = torch.cat([waveform.unsqueeze(-1), specgram, a1], dim=-1) + x = self.fc(x) + res = x + x, _ = self.rnn1(x, h1) + + x = x + res + res = x + x = torch.cat([x, a2], dim=-1) + x, _ = self.rnn2(x, h2) + + x = x + res + x = torch.cat([x, a3], dim=-1) + x = self.fc1(x) + x = self.relu1(x) + + x = torch.cat([x, a4], dim=-1) + x = self.fc2(x) + x = self.relu2(x) + x = self.fc3(x) + + # bring back channel dimension + return x.unsqueeze(1)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/sox_effects/sox_effects.html b/0.9.0/_modules/torchaudio/sox_effects/sox_effects.html new file mode 100644 index 0000000000..41030ce269 --- /dev/null +++ b/0.9.0/_modules/torchaudio/sox_effects/sox_effects.html @@ -0,0 +1,896 @@ + + + + + + + + + + + + torchaudio.sox_effects.sox_effects — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.sox_effects.sox_effects

+import os
+from typing import List, Tuple, Optional
+
+import torch
+
+import torchaudio
+from torchaudio._internal import module_utils as _mod_utils
+from torchaudio.utils.sox_utils import list_effects
+
+
+
[docs]@_mod_utils.requires_sox() +def init_sox_effects(): + """Initialize resources required to use sox effects. + + Note: + You do not need to call this function manually. It is called automatically. + + Once initialized, you do not need to call this function again across the multiple uses of + sox effects though it is safe to do so as long as :func:`shutdown_sox_effects` is not called yet. + Once :func:`shutdown_sox_effects` is called, you can no longer use SoX effects and initializing + again will result in error. + """ + torch.ops.torchaudio.sox_effects_initialize_sox_effects()
+ + +
[docs]@_mod_utils.requires_sox() +def shutdown_sox_effects(): + """Clean up resources required to use sox effects. + + Note: + You do not need to call this function manually. It is called automatically. + + It is safe to call this function multiple times. + Once :py:func:`shutdown_sox_effects` is called, you can no longer use SoX effects and + initializing again will result in error. + """ + torch.ops.torchaudio.sox_effects_shutdown_sox_effects()
+ + +
[docs]@_mod_utils.requires_sox() +def effect_names() -> List[str]: + """Gets list of valid sox effect names + + Returns: + List[str]: list of available effect names. + + Example + >>> torchaudio.sox_effects.effect_names() + ['allpass', 'band', 'bandpass', ... ] + """ + return list(list_effects().keys())
+ + +
[docs]@_mod_utils.requires_sox() +def apply_effects_tensor( + tensor: torch.Tensor, + sample_rate: int, + effects: List[List[str]], + channels_first: bool = True, +) -> Tuple[torch.Tensor, int]: + """Apply sox effects to given Tensor + + Note: + This function only works on CPU Tensors. + This function works in the way very similar to ``sox`` command, however there are slight + differences. For example, ``sox`` command adds certain effects automatically (such as + ``rate`` effect after ``speed`` and ``pitch`` and other effects), but this function does + only applies the given effects. (Therefore, to actually apply ``speed`` effect, you also + need to give ``rate`` effect with desired sampling rate.). + + Args: + tensor (torch.Tensor): Input 2D CPU Tensor. + sample_rate (int): Sample rate + effects (List[List[str]]): List of effects. + channels_first (bool): Indicates if the input Tensor's dimension is + ``[channels, time]`` or ``[time, channels]`` + + Returns: + Tuple[torch.Tensor, int]: Resulting Tensor and sample rate. + The resulting Tensor has the same ``dtype`` as the input Tensor, and + the same channels order. The shape of the Tensor can be different based on the + effects applied. Sample rate can also be different based on the effects applied. + + Example - Basic usage + >>> + >>> # Defines the effects to apply + >>> effects = [ + ... ['gain', '-n'], # normalises to 0dB + ... ['pitch', '5'], # 5 cent pitch shift + ... ['rate', '8000'], # resample to 8000 Hz + ... ] + >>> + >>> # Generate pseudo wave: + >>> # normalized, channels first, 2ch, sampling rate 16000, 1 second + >>> sample_rate = 16000 + >>> waveform = 2 * torch.rand([2, sample_rate * 1]) - 1 + >>> waveform.shape + torch.Size([2, 16000]) + >>> waveform + tensor([[ 0.3138, 0.7620, -0.9019, ..., -0.7495, -0.4935, 0.5442], + [-0.0832, 0.0061, 0.8233, ..., -0.5176, -0.9140, -0.2434]]) + >>> + >>> # Apply effects + >>> waveform, sample_rate = apply_effects_tensor( + ... wave_form, sample_rate, effects, channels_first=True) + >>> + >>> # Check the result + >>> # The new waveform is sampling rate 8000, 1 second. + >>> # normalization and channel order are preserved + >>> waveform.shape + torch.Size([2, 8000]) + >>> waveform + tensor([[ 0.5054, -0.5518, -0.4800, ..., -0.0076, 0.0096, -0.0110], + [ 0.1331, 0.0436, -0.3783, ..., -0.0035, 0.0012, 0.0008]]) + >>> sample_rate + 8000 + + Example - Torchscript-able transform + >>> + >>> # Use `apply_effects_tensor` in `torch.nn.Module` and dump it to file, + >>> # then run sox effect via Torchscript runtime. + >>> + >>> class SoxEffectTransform(torch.nn.Module): + ... effects: List[List[str]] + ... + ... def __init__(self, effects: List[List[str]]): + ... super().__init__() + ... self.effects = effects + ... + ... def forward(self, tensor: torch.Tensor, sample_rate: int): + ... return sox_effects.apply_effects_tensor( + ... tensor, sample_rate, self.effects) + ... + ... + >>> # Create transform object + >>> effects = [ + ... ["lowpass", "-1", "300"], # apply single-pole lowpass filter + ... ["rate", "8000"], # change sample rate to 8000 + ... ] + >>> transform = SoxEffectTensorTransform(effects, input_sample_rate) + >>> + >>> # Dump it to file and load + >>> path = 'sox_effect.zip' + >>> torch.jit.script(trans).save(path) + >>> transform = torch.jit.load(path) + >>> + >>>> # Run transform + >>> waveform, input_sample_rate = torchaudio.load("input.wav") + >>> waveform, sample_rate = transform(waveform, input_sample_rate) + >>> assert sample_rate == 8000 + """ + return torch.ops.torchaudio.sox_effects_apply_effects_tensor( + tensor, sample_rate, effects, channels_first)
+ + +
[docs]@_mod_utils.requires_sox() +def apply_effects_file( + path: str, + effects: List[List[str]], + normalize: bool = True, + channels_first: bool = True, + format: Optional[str] = None, +) -> Tuple[torch.Tensor, int]: + """Apply sox effects to the audio file and load the resulting data as Tensor + + Note: + This function works in the way very similar to ``sox`` command, however there are slight + differences. For example, ``sox`` commnad adds certain effects automatically (such as + ``rate`` effect after ``speed``, ``pitch`` etc), but this function only applies the given + effects. Therefore, to actually apply ``speed`` effect, you also need to give ``rate`` + effect with desired sampling rate, because internally, ``speed`` effects only alter sampling + rate and leave samples untouched. + + Args: + path (path-like object or file-like object): + Source of audio data. When the function is not compiled by TorchScript, + (e.g. ``torch.jit.script``), the following types are accepted: + + * ``path-like``: file path + * ``file-like``: Object with ``read(size: int) -> bytes`` method, + which returns byte string of at most ``size`` length. + + When the function is compiled by TorchScript, only ``str`` type is allowed. + + Note: This argument is intentionally annotated as ``str`` only for + TorchScript compiler compatibility. + effects (List[List[str]]): List of effects. + normalize (bool): + When ``True``, this function always return ``float32``, and sample values are + normalized to ``[-1.0, 1.0]``. + If input file is integer WAV, giving ``False`` will change the resulting Tensor type to + integer type. This argument has no effect for formats other + than integer WAV type. + channels_first (bool): When True, the returned Tensor has dimension ``[channel, time]``. + Otherwise, the returned Tensor's dimension is ``[time, channel]``. + format (str, optional): + Override the format detection with the given format. + Providing the argument might help when libsox can not infer the format + from header or extension, + + Returns: + Tuple[torch.Tensor, int]: Resulting Tensor and sample rate. + If ``normalize=True``, the resulting Tensor is always ``float32`` type. + If ``normalize=False`` and the input audio file is of integer WAV file, then the + resulting Tensor has corresponding integer type. (Note 24 bit integer type is not supported) + If ``channels_first=True``, the resulting Tensor has dimension ``[channel, time]``, + otherwise ``[time, channel]``. + + Example - Basic usage + >>> + >>> # Defines the effects to apply + >>> effects = [ + ... ['gain', '-n'], # normalises to 0dB + ... ['pitch', '5'], # 5 cent pitch shift + ... ['rate', '8000'], # resample to 8000 Hz + ... ] + >>> + >>> # Apply effects and load data with channels_first=True + >>> waveform, sample_rate = apply_effects_file("data.wav", effects, channels_first=True) + >>> + >>> # Check the result + >>> waveform.shape + torch.Size([2, 8000]) + >>> waveform + tensor([[ 5.1151e-03, 1.8073e-02, 2.2188e-02, ..., 1.0431e-07, + -1.4761e-07, 1.8114e-07], + [-2.6924e-03, 2.1860e-03, 1.0650e-02, ..., 6.4122e-07, + -5.6159e-07, 4.8103e-07]]) + >>> sample_rate + 8000 + + Example - Apply random speed perturbation to dataset + >>> + >>> # Load data from file, apply random speed perturbation + >>> class RandomPerturbationFile(torch.utils.data.Dataset): + ... \"\"\"Given flist, apply random speed perturbation + ... + ... Suppose all the input files are at least one second long. + ... \"\"\" + ... def __init__(self, flist: List[str], sample_rate: int): + ... super().__init__() + ... self.flist = flist + ... self.sample_rate = sample_rate + ... + ... def __getitem__(self, index): + ... speed = 0.5 + 1.5 * random.randn() + ... effects = [ + ... ['gain', '-n', '-10'], # apply 10 db attenuation + ... ['remix', '-'], # merge all the channels + ... ['speed', f'{speed:.5f}'], # duration is now 0.5 ~ 2.0 seconds. + ... ['rate', f'{self.sample_rate}'], + ... ['pad', '0', '1.5'], # add 1.5 seconds silence at the end + ... ['trim', '0', '2'], # get the first 2 seconds + ... ] + ... waveform, _ = torchaudio.sox_effects.apply_effects_file( + ... self.flist[index], effects) + ... return waveform + ... + ... def __len__(self): + ... return len(self.flist) + ... + >>> dataset = RandomPerturbationFile(file_list, sample_rate=8000) + >>> loader = torch.utils.data.DataLoader(dataset, batch_size=32) + >>> for batch in loader: + >>> pass + """ + if not torch.jit.is_scripting(): + if hasattr(path, 'read'): + return torchaudio._torchaudio.apply_effects_fileobj( + path, effects, normalize, channels_first, format) + path = os.fspath(path) + return torch.ops.torchaudio.sox_effects_apply_effects_file( + path, effects, normalize, channels_first, format)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/transforms.html b/0.9.0/_modules/torchaudio/transforms.html new file mode 100644 index 0000000000..4d8bd05906 --- /dev/null +++ b/0.9.0/_modules/torchaudio/transforms.html @@ -0,0 +1,1833 @@ + + + + + + + + + + + + torchaudio.transforms — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.transforms

+# -*- coding: utf-8 -*-
+
+import math
+import warnings
+from typing import Callable, Optional
+
+import torch
+from torch import Tensor
+from torchaudio import functional as F
+
+from .functional.functional import (
+    _get_sinc_resample_kernel,
+    _apply_sinc_resample_kernel,
+)
+
+__all__ = [
+    'Spectrogram',
+    'GriffinLim',
+    'AmplitudeToDB',
+    'MelScale',
+    'InverseMelScale',
+    'MelSpectrogram',
+    'MFCC',
+    'MuLawEncoding',
+    'MuLawDecoding',
+    'Resample',
+    'ComplexNorm',
+    'TimeStretch',
+    'Fade',
+    'FrequencyMasking',
+    'TimeMasking',
+    'SlidingWindowCmn',
+    'Vad',
+    'SpectralCentroid',
+    'Vol',
+    'ComputeDeltas',
+]
+
+
+
[docs]class Spectrogram(torch.nn.Module): + r"""Create a spectrogram from a audio signal. + + Args: + n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``) + win_length (int or None, optional): Window size. (Default: ``n_fft``) + hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``) + pad (int, optional): Two sided padding of signal. (Default: ``0``) + window_fn (Callable[..., Tensor], optional): A function to create a window tensor + that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``) + power (float or None, optional): Exponent for the magnitude spectrogram, + (must be > 0) e.g., 1 for energy, 2 for power, etc. + If None, then the complex spectrum is returned instead. (Default: ``2``) + normalized (bool, optional): Whether to normalize by magnitude after stft. (Default: ``False``) + wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``) + center (bool, optional): whether to pad :attr:`waveform` on both sides so + that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. + (Default: ``True``) + pad_mode (string, optional): controls the padding method used when + :attr:`center` is ``True``. (Default: ``"reflect"``) + onesided (bool, optional): controls whether to return half of results to + avoid redundancy (Default: ``True``) + return_complex (bool, optional): + Indicates whether the resulting complex-valued Tensor should be represented with + native complex dtype, such as `torch.cfloat` and `torch.cdouble`, or real dtype + mimicking complex value with an extra dimension for real and imaginary parts. + This argument is only effective when ``power=None``. + See also ``torch.view_as_real``. + """ + __constants__ = ['n_fft', 'win_length', 'hop_length', 'pad', 'power', 'normalized'] + + def __init__(self, + n_fft: int = 400, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + pad: int = 0, + window_fn: Callable[..., Tensor] = torch.hann_window, + power: Optional[float] = 2., + normalized: bool = False, + wkwargs: Optional[dict] = None, + center: bool = True, + pad_mode: str = "reflect", + onesided: bool = True, + return_complex: bool = False) -> None: + super(Spectrogram, self).__init__() + self.n_fft = n_fft + # number of FFT bins. the returned STFT result will have n_fft // 2 + 1 + # number of frequencies due to onesided=True in torch.stft + self.win_length = win_length if win_length is not None else n_fft + self.hop_length = hop_length if hop_length is not None else self.win_length // 2 + window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs) + self.register_buffer('window', window) + self.pad = pad + self.power = power + self.normalized = normalized + self.center = center + self.pad_mode = pad_mode + self.onesided = onesided + self.return_complex = return_complex + +
[docs] def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: Dimension (..., freq, time), where freq is + ``n_fft // 2 + 1`` where ``n_fft`` is the number of + Fourier bins, and time is the number of window hops (n_frame). + """ + return F.spectrogram( + waveform, + self.pad, + self.window, + self.n_fft, + self.hop_length, + self.win_length, + self.power, + self.normalized, + self.center, + self.pad_mode, + self.onesided, + self.return_complex, + )
+ + +
[docs]class GriffinLim(torch.nn.Module): + r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation. + + Implementation ported from + *librosa* [:footcite:`brian_mcfee-proc-scipy-2015`], *A fast Griffin-Lim algorithm* [:footcite:`6701851`] + and *Signal estimation from modified short-time Fourier transform* [:footcite:`1172092`]. + + Args: + n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``) + n_iter (int, optional): Number of iteration for phase recovery process. (Default: ``32``) + win_length (int or None, optional): Window size. (Default: ``n_fft``) + hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``) + window_fn (Callable[..., Tensor], optional): A function to create a window tensor + that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``) + power (float, optional): Exponent for the magnitude spectrogram, + (must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: ``2``) + wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``) + momentum (float, optional): The momentum parameter for fast Griffin-Lim. + Setting this to 0 recovers the original Griffin-Lim method. + Values near 1 can lead to faster convergence, but above 1 may not converge. (Default: ``0.99``) + length (int, optional): Array length of the expected output. (Default: ``None``) + rand_init (bool, optional): Initializes phase randomly if True and to zero otherwise. (Default: ``True``) + """ + __constants__ = ['n_fft', 'n_iter', 'win_length', 'hop_length', 'power', + 'length', 'momentum', 'rand_init'] + + def __init__(self, + n_fft: int = 400, + n_iter: int = 32, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + window_fn: Callable[..., Tensor] = torch.hann_window, + power: float = 2., + wkwargs: Optional[dict] = None, + momentum: float = 0.99, + length: Optional[int] = None, + rand_init: bool = True) -> None: + super(GriffinLim, self).__init__() + + assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum) + assert momentum >= 0, 'momentum={} < 0'.format(momentum) + + self.n_fft = n_fft + self.n_iter = n_iter + self.win_length = win_length if win_length is not None else n_fft + self.hop_length = hop_length if hop_length is not None else self.win_length // 2 + window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs) + self.register_buffer('window', window) + self.length = length + self.power = power + self.momentum = momentum / (1 + momentum) + self.rand_init = rand_init + +
[docs] def forward(self, specgram: Tensor) -> Tensor: + r""" + Args: + specgram (Tensor): + A magnitude-only STFT spectrogram of dimension (..., freq, frames) + where freq is ``n_fft // 2 + 1``. + + Returns: + Tensor: waveform of (..., time), where time equals the ``length`` parameter if given. + """ + return F.griffinlim(specgram, self.window, self.n_fft, self.hop_length, self.win_length, self.power, + self.n_iter, self.momentum, self.length, self.rand_init)
+ + +
[docs]class AmplitudeToDB(torch.nn.Module): + r"""Turn a tensor from the power/amplitude scale to the decibel scale. + + This output depends on the maximum value in the input tensor, and so + may return different values for an audio clip split into snippets vs. a + a full clip. + + Args: + stype (str, optional): scale of input tensor ('power' or 'magnitude'). The + power being the elementwise square of the magnitude. (Default: ``'power'``) + top_db (float, optional): minimum negative cut-off in decibels. A reasonable number + is 80. (Default: ``None``) + """ + __constants__ = ['multiplier', 'amin', 'ref_value', 'db_multiplier'] + + def __init__(self, stype: str = 'power', top_db: Optional[float] = None) -> None: + super(AmplitudeToDB, self).__init__() + self.stype = stype + if top_db is not None and top_db < 0: + raise ValueError('top_db must be positive value') + self.top_db = top_db + self.multiplier = 10.0 if stype == 'power' else 20.0 + self.amin = 1e-10 + self.ref_value = 1.0 + self.db_multiplier = math.log10(max(self.amin, self.ref_value)) + +
[docs] def forward(self, x: Tensor) -> Tensor: + r"""Numerically stable implementation from Librosa. + + https://librosa.org/doc/latest/generated/librosa.amplitude_to_db.html + + Args: + x (Tensor): Input tensor before being converted to decibel scale. + + Returns: + Tensor: Output tensor in decibel scale. + """ + return F.amplitude_to_DB(x, self.multiplier, self.amin, self.db_multiplier, self.top_db)
+ + +
[docs]class MelScale(torch.nn.Module): + r"""Turn a normal STFT into a mel frequency STFT, using a conversion + matrix. This uses triangular filter banks. + + User can control which device the filter bank (`fb`) is (e.g. fb.to(spec_f.device)). + + Args: + n_mels (int, optional): Number of mel filterbanks. (Default: ``128``) + sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``) + f_min (float, optional): Minimum frequency. (Default: ``0.``) + f_max (float or None, optional): Maximum frequency. (Default: ``sample_rate // 2``) + n_stft (int, optional): Number of bins in STFT. Calculated from first input + if None is given. See ``n_fft`` in :class:`Spectrogram`. (Default: ``None``) + norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band + (area normalization). (Default: ``None``) + mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) + """ + __constants__ = ['n_mels', 'sample_rate', 'f_min', 'f_max'] + + def __init__(self, + n_mels: int = 128, + sample_rate: int = 16000, + f_min: float = 0., + f_max: Optional[float] = None, + n_stft: Optional[int] = None, + norm: Optional[str] = None, + mel_scale: str = "htk") -> None: + super(MelScale, self).__init__() + self.n_mels = n_mels + self.sample_rate = sample_rate + self.f_max = f_max if f_max is not None else float(sample_rate // 2) + self.f_min = f_min + self.norm = norm + self.mel_scale = mel_scale + + assert f_min <= self.f_max, 'Require f_min: {} < f_max: {}'.format(f_min, self.f_max) + + if n_stft is None or n_stft == 0: + warnings.warn( + 'Initialization of torchaudio.transforms.MelScale with an unset weight ' + '`n_stft=None` is deprecated and will be removed in release 0.10. ' + 'Please set a proper `n_stft` value. Typically this is `n_fft // 2 + 1`. ' + 'Refer to https://github.com/pytorch/audio/issues/1510 ' + 'for more details.' + ) + + fb = torch.empty(0) if n_stft is None else F.create_fb_matrix( + n_stft, self.f_min, self.f_max, self.n_mels, self.sample_rate, self.norm, + self.mel_scale) + self.register_buffer('fb', fb) + + def __prepare_scriptable__(self): + r"""If `self.fb` is empty, the `forward` method will try to resize the parameter, + which does not work once the transform is scripted. However, this error does not happen + until the transform is executed. This is inconvenient especially if the resulting + TorchScript object is executed in other environments. Therefore, we check the + validity of `self.fb` here and fail if the resulting TS does not work. + + Returns: + MelScale: self + """ + if self.fb.numel() == 0: + raise ValueError("n_stft must be provided at construction") + return self + +
[docs] def forward(self, specgram: Tensor) -> Tensor: + r""" + Args: + specgram (Tensor): A spectrogram STFT of dimension (..., freq, time). + + Returns: + Tensor: Mel frequency spectrogram of size (..., ``n_mels``, time). + """ + + # pack batch + shape = specgram.size() + specgram = specgram.reshape(-1, shape[-2], shape[-1]) + + if self.fb.numel() == 0: + tmp_fb = F.create_fb_matrix(specgram.size(1), self.f_min, self.f_max, + self.n_mels, self.sample_rate, self.norm, + self.mel_scale) + # Attributes cannot be reassigned outside __init__ so workaround + self.fb.resize_(tmp_fb.size()) + self.fb.copy_(tmp_fb) + + # (channel, frequency, time).transpose(...) dot (frequency, n_mels) + # -> (channel, time, n_mels).transpose(...) + mel_specgram = torch.matmul(specgram.transpose(1, 2), self.fb).transpose(1, 2) + + # unpack batch + mel_specgram = mel_specgram.reshape(shape[:-2] + mel_specgram.shape[-2:]) + + return mel_specgram
+ + +
[docs]class InverseMelScale(torch.nn.Module): + r"""Solve for a normal STFT from a mel frequency STFT, using a conversion + matrix. This uses triangular filter banks. + + It minimizes the euclidian norm between the input mel-spectrogram and the product between + the estimated spectrogram and the filter banks using SGD. + + Args: + n_stft (int): Number of bins in STFT. See ``n_fft`` in :class:`Spectrogram`. + n_mels (int, optional): Number of mel filterbanks. (Default: ``128``) + sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``) + f_min (float, optional): Minimum frequency. (Default: ``0.``) + f_max (float or None, optional): Maximum frequency. (Default: ``sample_rate // 2``) + max_iter (int, optional): Maximum number of optimization iterations. (Default: ``100000``) + tolerance_loss (float, optional): Value of loss to stop optimization at. (Default: ``1e-5``) + tolerance_change (float, optional): Difference in losses to stop optimization at. (Default: ``1e-8``) + sgdargs (dict or None, optional): Arguments for the SGD optimizer. (Default: ``None``) + norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band + (area normalization). (Default: ``None``) + mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) + """ + __constants__ = ['n_stft', 'n_mels', 'sample_rate', 'f_min', 'f_max', 'max_iter', 'tolerance_loss', + 'tolerance_change', 'sgdargs'] + + def __init__(self, + n_stft: int, + n_mels: int = 128, + sample_rate: int = 16000, + f_min: float = 0., + f_max: Optional[float] = None, + max_iter: int = 100000, + tolerance_loss: float = 1e-5, + tolerance_change: float = 1e-8, + sgdargs: Optional[dict] = None, + norm: Optional[str] = None, + mel_scale: str = "htk") -> None: + super(InverseMelScale, self).__init__() + self.n_mels = n_mels + self.sample_rate = sample_rate + self.f_max = f_max or float(sample_rate // 2) + self.f_min = f_min + self.max_iter = max_iter + self.tolerance_loss = tolerance_loss + self.tolerance_change = tolerance_change + self.sgdargs = sgdargs or {'lr': 0.1, 'momentum': 0.9} + + assert f_min <= self.f_max, 'Require f_min: {} < f_max: {}'.format(f_min, self.f_max) + + fb = F.create_fb_matrix(n_stft, self.f_min, self.f_max, self.n_mels, self.sample_rate, norm, + mel_scale) + self.register_buffer('fb', fb) + +
[docs] def forward(self, melspec: Tensor) -> Tensor: + r""" + Args: + melspec (Tensor): A Mel frequency spectrogram of dimension (..., ``n_mels``, time) + + Returns: + Tensor: Linear scale spectrogram of size (..., freq, time) + """ + # pack batch + shape = melspec.size() + melspec = melspec.view(-1, shape[-2], shape[-1]) + + n_mels, time = shape[-2], shape[-1] + freq, _ = self.fb.size() # (freq, n_mels) + melspec = melspec.transpose(-1, -2) + assert self.n_mels == n_mels + + specgram = torch.rand(melspec.size()[0], time, freq, requires_grad=True, + dtype=melspec.dtype, device=melspec.device) + + optim = torch.optim.SGD([specgram], **self.sgdargs) + + loss = float('inf') + for _ in range(self.max_iter): + optim.zero_grad() + diff = melspec - specgram.matmul(self.fb) + new_loss = diff.pow(2).sum(axis=-1).mean() + # take sum over mel-frequency then average over other dimensions + # so that loss threshold is applied par unit timeframe + new_loss.backward() + optim.step() + specgram.data = specgram.data.clamp(min=0) + + new_loss = new_loss.item() + if new_loss < self.tolerance_loss or abs(loss - new_loss) < self.tolerance_change: + break + loss = new_loss + + specgram.requires_grad_(False) + specgram = specgram.clamp(min=0).transpose(-1, -2) + + # unpack batch + specgram = specgram.view(shape[:-2] + (freq, time)) + return specgram
+ + +
[docs]class MelSpectrogram(torch.nn.Module): + r"""Create MelSpectrogram for a raw audio signal. This is a composition of Spectrogram + and MelScale. + + Sources + * https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe + * https://timsainb.github.io/spectrograms-mfccs-and-inversion-in-python.html + * http://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html + + Args: + sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``) + n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``) + win_length (int or None, optional): Window size. (Default: ``n_fft``) + hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``) + f_min (float, optional): Minimum frequency. (Default: ``0.``) + f_max (float or None, optional): Maximum frequency. (Default: ``None``) + pad (int, optional): Two sided padding of signal. (Default: ``0``) + n_mels (int, optional): Number of mel filterbanks. (Default: ``128``) + window_fn (Callable[..., Tensor], optional): A function to create a window tensor + that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``) + power (float, optional): Exponent for the magnitude spectrogram, + (must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: ``2``) + normalized (bool, optional): Whether to normalize by magnitude after stft. (Default: ``False``) + wkwargs (Dict[..., ...] or None, optional): Arguments for window function. (Default: ``None``) + center (bool, optional): whether to pad :attr:`waveform` on both sides so + that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. + (Default: ``True``) + pad_mode (string, optional): controls the padding method used when + :attr:`center` is ``True``. (Default: ``"reflect"``) + onesided (bool, optional): controls whether to return half of results to + avoid redundancy. (Default: ``True``) + norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band + (area normalization). (Default: ``None``) + mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) + + Example + >>> waveform, sample_rate = torchaudio.load('test.wav', normalization=True) + >>> mel_specgram = transforms.MelSpectrogram(sample_rate)(waveform) # (channel, n_mels, time) + """ + __constants__ = ['sample_rate', 'n_fft', 'win_length', 'hop_length', 'pad', 'n_mels', 'f_min'] + + def __init__(self, + sample_rate: int = 16000, + n_fft: int = 400, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + f_min: float = 0., + f_max: Optional[float] = None, + pad: int = 0, + n_mels: int = 128, + window_fn: Callable[..., Tensor] = torch.hann_window, + power: float = 2., + normalized: bool = False, + wkwargs: Optional[dict] = None, + center: bool = True, + pad_mode: str = "reflect", + onesided: bool = True, + norm: Optional[str] = None, + mel_scale: str = "htk") -> None: + super(MelSpectrogram, self).__init__() + self.sample_rate = sample_rate + self.n_fft = n_fft + self.win_length = win_length if win_length is not None else n_fft + self.hop_length = hop_length if hop_length is not None else self.win_length // 2 + self.pad = pad + self.power = power + self.normalized = normalized + self.n_mels = n_mels # number of mel frequency bins + self.f_max = f_max + self.f_min = f_min + self.spectrogram = Spectrogram(n_fft=self.n_fft, win_length=self.win_length, + hop_length=self.hop_length, + pad=self.pad, window_fn=window_fn, power=self.power, + normalized=self.normalized, wkwargs=wkwargs, + center=center, pad_mode=pad_mode, onesided=onesided) + self.mel_scale = MelScale( + self.n_mels, + self.sample_rate, + self.f_min, + self.f_max, + self.n_fft // 2 + 1, + norm, + mel_scale + ) + +
[docs] def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: Mel frequency spectrogram of size (..., ``n_mels``, time). + """ + specgram = self.spectrogram(waveform) + mel_specgram = self.mel_scale(specgram) + return mel_specgram
+ + +
[docs]class MFCC(torch.nn.Module): + r"""Create the Mel-frequency cepstrum coefficients from an audio signal. + + By default, this calculates the MFCC on the DB-scaled Mel spectrogram. + This is not the textbook implementation, but is implemented here to + give consistency with librosa. + + This output depends on the maximum value in the input spectrogram, and so + may return different values for an audio clip split into snippets vs. a + a full clip. + + Args: + sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``) + n_mfcc (int, optional): Number of mfc coefficients to retain. (Default: ``40``) + dct_type (int, optional): type of DCT (discrete cosine transform) to use. (Default: ``2``) + norm (str, optional): norm to use. (Default: ``'ortho'``) + log_mels (bool, optional): whether to use log-mel spectrograms instead of db-scaled. (Default: ``False``) + melkwargs (dict or None, optional): arguments for MelSpectrogram. (Default: ``None``) + """ + __constants__ = ['sample_rate', 'n_mfcc', 'dct_type', 'top_db', 'log_mels'] + + def __init__(self, + sample_rate: int = 16000, + n_mfcc: int = 40, + dct_type: int = 2, + norm: str = 'ortho', + log_mels: bool = False, + melkwargs: Optional[dict] = None) -> None: + super(MFCC, self).__init__() + supported_dct_types = [2] + if dct_type not in supported_dct_types: + raise ValueError('DCT type not supported: {}'.format(dct_type)) + self.sample_rate = sample_rate + self.n_mfcc = n_mfcc + self.dct_type = dct_type + self.norm = norm + self.top_db = 80.0 + self.amplitude_to_DB = AmplitudeToDB('power', self.top_db) + + if melkwargs is not None: + self.MelSpectrogram = MelSpectrogram(sample_rate=self.sample_rate, **melkwargs) + else: + self.MelSpectrogram = MelSpectrogram(sample_rate=self.sample_rate) + + if self.n_mfcc > self.MelSpectrogram.n_mels: + raise ValueError('Cannot select more MFCC coefficients than # mel bins') + dct_mat = F.create_dct(self.n_mfcc, self.MelSpectrogram.n_mels, self.norm) + self.register_buffer('dct_mat', dct_mat) + self.log_mels = log_mels + +
[docs] def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: specgram_mel_db of size (..., ``n_mfcc``, time). + """ + mel_specgram = self.MelSpectrogram(waveform) + if self.log_mels: + log_offset = 1e-6 + mel_specgram = torch.log(mel_specgram + log_offset) + else: + mel_specgram = self.amplitude_to_DB(mel_specgram) + + # (..., channel, n_mels, time).transpose(...) dot (n_mels, n_mfcc) + # -> (..., channel, time, n_mfcc).transpose(...) + mfcc = torch.matmul(mel_specgram.transpose(-2, -1), self.dct_mat).transpose(-2, -1) + return mfcc
+ + +
[docs]class MuLawEncoding(torch.nn.Module): + r"""Encode signal based on mu-law companding. For more info see the + `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ + + This algorithm assumes the signal has been scaled to between -1 and 1 and + returns a signal encoded with values from 0 to quantization_channels - 1 + + Args: + quantization_channels (int, optional): Number of channels. (Default: ``256``) + """ + __constants__ = ['quantization_channels'] + + def __init__(self, quantization_channels: int = 256) -> None: + super(MuLawEncoding, self).__init__() + self.quantization_channels = quantization_channels + +
[docs] def forward(self, x: Tensor) -> Tensor: + r""" + Args: + x (Tensor): A signal to be encoded. + + Returns: + x_mu (Tensor): An encoded signal. + """ + return F.mu_law_encoding(x, self.quantization_channels)
+ + +
[docs]class MuLawDecoding(torch.nn.Module): + r"""Decode mu-law encoded signal. For more info see the + `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ + + This expects an input with values between 0 and quantization_channels - 1 + and returns a signal scaled between -1 and 1. + + Args: + quantization_channels (int, optional): Number of channels. (Default: ``256``) + """ + __constants__ = ['quantization_channels'] + + def __init__(self, quantization_channels: int = 256) -> None: + super(MuLawDecoding, self).__init__() + self.quantization_channels = quantization_channels + +
[docs] def forward(self, x_mu: Tensor) -> Tensor: + r""" + Args: + x_mu (Tensor): A mu-law encoded signal which needs to be decoded. + + Returns: + Tensor: The signal decoded. + """ + return F.mu_law_decoding(x_mu, self.quantization_channels)
+ + +
[docs]class Resample(torch.nn.Module): + r"""Resample a signal from one frequency to another. A resampling method can be given. + + Note: + If resampling on waveforms of higher precision than float32, there may be a small loss of precision + because the kernel is cached once as float32. If high precision resampling is important for your application, + the functional form will retain higher precision, but run slower because it does not cache the kernel. + Alternatively, you could rewrite a transform that caches a higher precision kernel. + + Args: + orig_freq (float, optional): The original frequency of the signal. (Default: ``16000``) + new_freq (float, optional): The desired frequency. (Default: ``16000``) + resampling_method (str, optional): The resampling method to use. + Options: [``sinc_interpolation``, ``kaiser_window``] (Default: ``'sinc_interpolation'``) + lowpass_filter_width (int, optional): Controls the sharpness of the filter, more == sharper + but less efficient. (Default: ``6``) + rolloff (float, optional): The roll-off frequency of the filter, as a fraction of the Nyquist. + Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: ``0.99``) + beta (float or None): The shape parameter used for kaiser window. + dtype (torch.device, optional): + Determnines the precision that resampling kernel is pre-computed and cached. If not provided, + kernel is computed with ``torch.float64`` then cached as ``torch.float32``. + If you need higher precision, provide ``torch.float64``, and the pre-computed kernel is computed and + cached as ``torch.float64``. If you use resample with lower precision, then instead of providing this + providing this argument, please use ``Resample.to(dtype)``, so that the kernel generation is still + carried out on ``torch.float64``. + """ + + def __init__( + self, + orig_freq: float = 16000, + new_freq: float = 16000, + resampling_method: str = 'sinc_interpolation', + lowpass_filter_width: int = 6, + rolloff: float = 0.99, + beta: Optional[float] = None, + *, + dtype: Optional[torch.dtype] = None, + ) -> None: + super().__init__() + + self.orig_freq = orig_freq + self.new_freq = new_freq + self.gcd = math.gcd(int(self.orig_freq), int(self.new_freq)) + self.resampling_method = resampling_method + self.lowpass_filter_width = lowpass_filter_width + self.rolloff = rolloff + self.beta = beta + + if self.orig_freq != self.new_freq: + kernel, self.width = _get_sinc_resample_kernel( + self.orig_freq, self.new_freq, self.gcd, + self.lowpass_filter_width, self.rolloff, + self.resampling_method, beta, dtype=dtype) + self.register_buffer('kernel', kernel) + +
[docs] def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: Output signal of dimension (..., time). + """ + if self.orig_freq == self.new_freq: + return waveform + return _apply_sinc_resample_kernel( + waveform, self.orig_freq, self.new_freq, self.gcd, + self.kernel, self.width)
+ + +
[docs]class ComplexNorm(torch.nn.Module): + r"""Compute the norm of complex tensor input. + + Args: + power (float, optional): Power of the norm. (Default: to ``1.0``) + """ + __constants__ = ['power'] + + def __init__(self, power: float = 1.0) -> None: + warnings.warn( + 'torchaudio.transforms.ComplexNorm has been deprecated ' + 'and will be removed from future release.' + 'Please convert the input Tensor to complex type with `torch.view_as_complex` then ' + 'use `torch.abs` and `torch.angle`. ' + 'Please refer to https://github.com/pytorch/audio/issues/1337 ' + "for more details about torchaudio's plan to migrate to native complex type." + ) + super(ComplexNorm, self).__init__() + self.power = power + +
[docs] def forward(self, complex_tensor: Tensor) -> Tensor: + r""" + Args: + complex_tensor (Tensor): Tensor shape of `(..., complex=2)`. + + Returns: + Tensor: norm of the input tensor, shape of `(..., )`. + """ + return F.complex_norm(complex_tensor, self.power)
+ + +
[docs]class ComputeDeltas(torch.nn.Module): + r"""Compute delta coefficients of a tensor, usually a spectrogram. + + See `torchaudio.functional.compute_deltas` for more details. + + Args: + win_length (int): The window length used for computing delta. (Default: ``5``) + mode (str): Mode parameter passed to padding. (Default: ``'replicate'``) + """ + __constants__ = ['win_length'] + + def __init__(self, win_length: int = 5, mode: str = "replicate") -> None: + super(ComputeDeltas, self).__init__() + self.win_length = win_length + self.mode = mode + +
[docs] def forward(self, specgram: Tensor) -> Tensor: + r""" + Args: + specgram (Tensor): Tensor of audio of dimension (..., freq, time). + + Returns: + Tensor: Tensor of deltas of dimension (..., freq, time). + """ + return F.compute_deltas(specgram, win_length=self.win_length, mode=self.mode)
+ + +
[docs]class TimeStretch(torch.nn.Module): + r"""Stretch stft in time without modifying pitch for a given rate. + + Args: + hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``) + n_freq (int, optional): number of filter banks from stft. (Default: ``201``) + fixed_rate (float or None, optional): rate to speed up or slow down by. + If None is provided, rate must be passed to the forward method. (Default: ``None``) + """ + __constants__ = ['fixed_rate'] + + def __init__(self, + hop_length: Optional[int] = None, + n_freq: int = 201, + fixed_rate: Optional[float] = None) -> None: + super(TimeStretch, self).__init__() + + self.fixed_rate = fixed_rate + + n_fft = (n_freq - 1) * 2 + hop_length = hop_length if hop_length is not None else n_fft // 2 + self.register_buffer('phase_advance', torch.linspace(0, math.pi * hop_length, n_freq)[..., None]) + +
[docs] def forward(self, complex_specgrams: Tensor, overriding_rate: Optional[float] = None) -> Tensor: + r""" + Args: + complex_specgrams (Tensor): + Either a real tensor of dimension of ``(..., freq, num_frame, complex=2)`` + or a tensor of dimension ``(..., freq, num_frame)`` with complex dtype. + overriding_rate (float or None, optional): speed up to apply to this batch. + If no rate is passed, use ``self.fixed_rate``. (Default: ``None``) + + Returns: + Tensor: + Stretched spectrogram. The resulting tensor is of the same dtype as the input + spectrogram, but the number of frames is changed to ``ceil(num_frame / rate)``. + """ + if overriding_rate is None: + if self.fixed_rate is None: + raise ValueError( + "If no fixed_rate is specified, must pass a valid rate to the forward method.") + rate = self.fixed_rate + else: + rate = overriding_rate + return F.phase_vocoder(complex_specgrams, rate, self.phase_advance)
+ + +
[docs]class Fade(torch.nn.Module): + r"""Add a fade in and/or fade out to an waveform. + + Args: + fade_in_len (int, optional): Length of fade-in (time frames). (Default: ``0``) + fade_out_len (int, optional): Length of fade-out (time frames). (Default: ``0``) + fade_shape (str, optional): Shape of fade. Must be one of: "quarter_sine", + "half_sine", "linear", "logarithmic", "exponential". (Default: ``"linear"``) + """ + + def __init__(self, + fade_in_len: int = 0, + fade_out_len: int = 0, + fade_shape: str = "linear") -> None: + super(Fade, self).__init__() + self.fade_in_len = fade_in_len + self.fade_out_len = fade_out_len + self.fade_shape = fade_shape + +
[docs] def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: Tensor of audio of dimension (..., time). + """ + waveform_length = waveform.size()[-1] + device = waveform.device + return self._fade_in(waveform_length).to(device) * \ + self._fade_out(waveform_length).to(device) * waveform
+ + def _fade_in(self, waveform_length: int) -> Tensor: + fade = torch.linspace(0, 1, self.fade_in_len) + ones = torch.ones(waveform_length - self.fade_in_len) + + if self.fade_shape == "linear": + fade = fade + + if self.fade_shape == "exponential": + fade = torch.pow(2, (fade - 1)) * fade + + if self.fade_shape == "logarithmic": + fade = torch.log10(.1 + fade) + 1 + + if self.fade_shape == "quarter_sine": + fade = torch.sin(fade * math.pi / 2) + + if self.fade_shape == "half_sine": + fade = torch.sin(fade * math.pi - math.pi / 2) / 2 + 0.5 + + return torch.cat((fade, ones)).clamp_(0, 1) + + def _fade_out(self, waveform_length: int) -> Tensor: + fade = torch.linspace(0, 1, self.fade_out_len) + ones = torch.ones(waveform_length - self.fade_out_len) + + if self.fade_shape == "linear": + fade = - fade + 1 + + if self.fade_shape == "exponential": + fade = torch.pow(2, - fade) * (1 - fade) + + if self.fade_shape == "logarithmic": + fade = torch.log10(1.1 - fade) + 1 + + if self.fade_shape == "quarter_sine": + fade = torch.sin(fade * math.pi / 2 + math.pi / 2) + + if self.fade_shape == "half_sine": + fade = torch.sin(fade * math.pi + math.pi / 2) / 2 + 0.5 + + return torch.cat((ones, fade)).clamp_(0, 1)
+ + +class _AxisMasking(torch.nn.Module): + r"""Apply masking to a spectrogram. + + Args: + mask_param (int): Maximum possible length of the mask. + axis (int): What dimension the mask is applied on. + iid_masks (bool): Applies iid masks to each of the examples in the batch dimension. + This option is applicable only when the input tensor is 4D. + """ + __constants__ = ['mask_param', 'axis', 'iid_masks'] + + def __init__(self, mask_param: int, axis: int, iid_masks: bool) -> None: + + super(_AxisMasking, self).__init__() + self.mask_param = mask_param + self.axis = axis + self.iid_masks = iid_masks + + def forward(self, specgram: Tensor, mask_value: float = 0.) -> Tensor: + r""" + Args: + specgram (Tensor): Tensor of dimension (..., freq, time). + mask_value (float): Value to assign to the masked columns. + + Returns: + Tensor: Masked spectrogram of dimensions (..., freq, time). + """ + # if iid_masks flag marked and specgram has a batch dimension + if self.iid_masks and specgram.dim() == 4: + return F.mask_along_axis_iid(specgram, self.mask_param, mask_value, self.axis + 1) + else: + return F.mask_along_axis(specgram, self.mask_param, mask_value, self.axis) + + +
[docs]class FrequencyMasking(_AxisMasking): + r"""Apply masking to a spectrogram in the frequency domain. + + Args: + freq_mask_param (int): maximum possible length of the mask. + Indices uniformly sampled from [0, freq_mask_param). + iid_masks (bool, optional): whether to apply different masks to each + example/channel in the batch. (Default: ``False``) + This option is applicable only when the input tensor is 4D. + """ + + def __init__(self, freq_mask_param: int, iid_masks: bool = False) -> None: + super(FrequencyMasking, self).__init__(freq_mask_param, 1, iid_masks)
+ + +
[docs]class TimeMasking(_AxisMasking): + r"""Apply masking to a spectrogram in the time domain. + + Args: + time_mask_param (int): maximum possible length of the mask. + Indices uniformly sampled from [0, time_mask_param). + iid_masks (bool, optional): whether to apply different masks to each + example/channel in the batch. (Default: ``False``) + This option is applicable only when the input tensor is 4D. + """ + + def __init__(self, time_mask_param: int, iid_masks: bool = False) -> None: + super(TimeMasking, self).__init__(time_mask_param, 2, iid_masks)
+ + +
[docs]class Vol(torch.nn.Module): + r"""Add a volume to an waveform. + + Args: + gain (float): Interpreted according to the given gain_type: + If ``gain_type`` = ``amplitude``, ``gain`` is a positive amplitude ratio. + If ``gain_type`` = ``power``, ``gain`` is a power (voltage squared). + If ``gain_type`` = ``db``, ``gain`` is in decibels. + gain_type (str, optional): Type of gain. One of: ``amplitude``, ``power``, ``db`` (Default: ``amplitude``) + """ + + def __init__(self, gain: float, gain_type: str = 'amplitude'): + super(Vol, self).__init__() + self.gain = gain + self.gain_type = gain_type + + if gain_type in ['amplitude', 'power'] and gain < 0: + raise ValueError("If gain_type = amplitude or power, gain must be positive.") + +
[docs] def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: Tensor of audio of dimension (..., time). + """ + if self.gain_type == "amplitude": + waveform = waveform * self.gain + + if self.gain_type == "db": + waveform = F.gain(waveform, self.gain) + + if self.gain_type == "power": + waveform = F.gain(waveform, 10 * math.log10(self.gain)) + + return torch.clamp(waveform, -1, 1)
+ + +
[docs]class SlidingWindowCmn(torch.nn.Module): + r""" + Apply sliding-window cepstral mean (and optionally variance) normalization per utterance. + + Args: + cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600) + min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start). + Only applicable if center == false, ignored if center==true (int, default = 100) + center (bool, optional): If true, use a window centered on the current frame + (to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false) + norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false) + """ + + def __init__(self, + cmn_window: int = 600, + min_cmn_window: int = 100, + center: bool = False, + norm_vars: bool = False) -> None: + super().__init__() + self.cmn_window = cmn_window + self.min_cmn_window = min_cmn_window + self.center = center + self.norm_vars = norm_vars + +
[docs] def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: Tensor of audio of dimension (..., time). + """ + cmn_waveform = F.sliding_window_cmn( + waveform, self.cmn_window, self.min_cmn_window, self.center, self.norm_vars) + return cmn_waveform
+ + +
[docs]class Vad(torch.nn.Module): + r"""Voice Activity Detector. Similar to SoX implementation. + Attempts to trim silence and quiet background sounds from the ends of recordings of speech. + The algorithm currently uses a simple cepstral power measurement to detect voice, + so may be fooled by other things, especially music. + + The effect can trim only from the front of the audio, + so in order to trim from the back, the reverse effect must also be used. + + Args: + sample_rate (int): Sample rate of audio signal. + trigger_level (float, optional): The measurement level used to trigger activity detection. + This may need to be cahnged depending on the noise level, signal level, + and other characteristics of the input audio. (Default: 7.0) + trigger_time (float, optional): The time constant (in seconds) + used to help ignore short bursts of sound. (Default: 0.25) + search_time (float, optional): The amount of audio (in seconds) + to search for quieter/shorter bursts of audio to include prior + to the detected trigger point. (Default: 1.0) + allowed_gap (float, optional): The allowed gap (in seconds) between + quiteter/shorter bursts of audio to include prior + to the detected trigger point. (Default: 0.25) + pre_trigger_time (float, optional): The amount of audio (in seconds) to preserve + before the trigger point and any found quieter/shorter bursts. (Default: 0.0) + boot_time (float, optional) The algorithm (internally) uses adaptive noise + estimation/reduction in order to detect the start of the wanted audio. + This option sets the time for the initial noise estimate. (Default: 0.35) + noise_up_time (float, optional) Time constant used by the adaptive noise estimator + for when the noise level is increasing. (Default: 0.1) + noise_down_time (float, optional) Time constant used by the adaptive noise estimator + for when the noise level is decreasing. (Default: 0.01) + noise_reduction_amount (float, optional) Amount of noise reduction to use in + the detection algorithm (e.g. 0, 0.5, ...). (Default: 1.35) + measure_freq (float, optional) Frequency of the algorithm’s + processing/measurements. (Default: 20.0) + measure_duration: (float, optional) Measurement duration. + (Default: Twice the measurement period; i.e. with overlap.) + measure_smooth_time (float, optional) Time constant used to smooth + spectral measurements. (Default: 0.4) + hp_filter_freq (float, optional) "Brick-wall" frequency of high-pass filter applied + at the input to the detector algorithm. (Default: 50.0) + lp_filter_freq (float, optional) "Brick-wall" frequency of low-pass filter applied + at the input to the detector algorithm. (Default: 6000.0) + hp_lifter_freq (float, optional) "Brick-wall" frequency of high-pass lifter used + in the detector algorithm. (Default: 150.0) + lp_lifter_freq (float, optional) "Brick-wall" frequency of low-pass lifter used + in the detector algorithm. (Default: 2000.0) + + Reference: + - http://sox.sourceforge.net/sox.html + """ + + def __init__(self, + sample_rate: int, + trigger_level: float = 7.0, + trigger_time: float = 0.25, + search_time: float = 1.0, + allowed_gap: float = 0.25, + pre_trigger_time: float = 0.0, + boot_time: float = .35, + noise_up_time: float = .1, + noise_down_time: float = .01, + noise_reduction_amount: float = 1.35, + measure_freq: float = 20.0, + measure_duration: Optional[float] = None, + measure_smooth_time: float = .4, + hp_filter_freq: float = 50., + lp_filter_freq: float = 6000., + hp_lifter_freq: float = 150., + lp_lifter_freq: float = 2000.) -> None: + super().__init__() + + self.sample_rate = sample_rate + self.trigger_level = trigger_level + self.trigger_time = trigger_time + self.search_time = search_time + self.allowed_gap = allowed_gap + self.pre_trigger_time = pre_trigger_time + self.boot_time = boot_time + self.noise_up_time = noise_up_time + self.noise_down_time = noise_down_time + self.noise_reduction_amount = noise_reduction_amount + self.measure_freq = measure_freq + self.measure_duration = measure_duration + self.measure_smooth_time = measure_smooth_time + self.hp_filter_freq = hp_filter_freq + self.lp_filter_freq = lp_filter_freq + self.hp_lifter_freq = hp_lifter_freq + self.lp_lifter_freq = lp_lifter_freq + +
[docs] def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension `(channels, time)` or `(time)` + Tensor of shape `(channels, time)` is treated as a multi-channel recording + of the same event and the resulting output will be trimmed to the earliest + voice activity in any channel. + """ + return F.vad( + waveform=waveform, + sample_rate=self.sample_rate, + trigger_level=self.trigger_level, + trigger_time=self.trigger_time, + search_time=self.search_time, + allowed_gap=self.allowed_gap, + pre_trigger_time=self.pre_trigger_time, + boot_time=self.boot_time, + noise_up_time=self.noise_up_time, + noise_down_time=self.noise_down_time, + noise_reduction_amount=self.noise_reduction_amount, + measure_freq=self.measure_freq, + measure_duration=self.measure_duration, + measure_smooth_time=self.measure_smooth_time, + hp_filter_freq=self.hp_filter_freq, + lp_filter_freq=self.lp_filter_freq, + hp_lifter_freq=self.hp_lifter_freq, + lp_lifter_freq=self.lp_lifter_freq, + )
+ + +
[docs]class SpectralCentroid(torch.nn.Module): + r"""Compute the spectral centroid for each channel along the time axis. + + The spectral centroid is defined as the weighted average of the + frequency values, weighted by their magnitude. + + Args: + sample_rate (int): Sample rate of audio signal. + n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``) + win_length (int or None, optional): Window size. (Default: ``n_fft``) + hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``) + pad (int, optional): Two sided padding of signal. (Default: ``0``) + window_fn (Callable[..., Tensor], optional): A function to create a window tensor + that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``) + wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``) + + Example + >>> waveform, sample_rate = torchaudio.load('test.wav', normalization=True) + >>> spectral_centroid = transforms.SpectralCentroid(sample_rate)(waveform) # (channel, time) + """ + __constants__ = ['sample_rate', 'n_fft', 'win_length', 'hop_length', 'pad'] + + def __init__(self, + sample_rate: int, + n_fft: int = 400, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + pad: int = 0, + window_fn: Callable[..., Tensor] = torch.hann_window, + wkwargs: Optional[dict] = None) -> None: + super(SpectralCentroid, self).__init__() + self.sample_rate = sample_rate + self.n_fft = n_fft + self.win_length = win_length if win_length is not None else n_fft + self.hop_length = hop_length if hop_length is not None else self.win_length // 2 + window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs) + self.register_buffer('window', window) + self.pad = pad + +
[docs] def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: Spectral Centroid of size (..., time). + """ + + return F.spectral_centroid(waveform, self.sample_rate, self.pad, self.window, self.n_fft, self.hop_length, + self.win_length)
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_modules/torchaudio/utils/sox_utils.html b/0.9.0/_modules/torchaudio/utils/sox_utils.html new file mode 100644 index 0000000000..47126129e7 --- /dev/null +++ b/0.9.0/_modules/torchaudio/utils/sox_utils.html @@ -0,0 +1,715 @@ + + + + + + + + + + + + torchaudio.utils.sox_utils — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +

Source code for torchaudio.utils.sox_utils

+from typing import List, Dict
+
+import torch
+from torchaudio._internal import module_utils as _mod_utils
+
+
+
[docs]@_mod_utils.requires_sox() +def set_seed(seed: int): + """Set libsox's PRNG + + Args: + seed (int): seed value. valid range is int32. + + See Also: + http://sox.sourceforge.net/sox.html + """ + torch.ops.torchaudio.sox_utils_set_seed(seed)
+ + +
[docs]@_mod_utils.requires_sox() +def set_verbosity(verbosity: int): + """Set libsox's verbosity + + Args: + verbosity (int): Set verbosity level of libsox. + + * ``1`` failure messages + * ``2`` warnings + * ``3`` details of processing + * ``4``-``6`` increasing levels of debug messages + + See Also: + http://sox.sourceforge.net/sox.html + """ + torch.ops.torchaudio.sox_utils_set_verbosity(verbosity)
+ + +
[docs]@_mod_utils.requires_sox() +def set_buffer_size(buffer_size: int): + """Set buffer size for sox effect chain + + Args: + buffer_size (int): Set the size in bytes of the buffers used for processing audio. + + See Also: + http://sox.sourceforge.net/sox.html + """ + torch.ops.torchaudio.sox_utils_set_buffer_size(buffer_size)
+ + +
[docs]@_mod_utils.requires_sox() +def set_use_threads(use_threads: bool): + """Set multithread option for sox effect chain + + Args: + use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing. + To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support. + + See Also: + http://sox.sourceforge.net/sox.html + """ + torch.ops.torchaudio.sox_utils_set_use_threads(use_threads)
+ + +
[docs]@_mod_utils.requires_sox() +def list_effects() -> Dict[str, str]: + """List the available sox effect names + + Returns: + Dict[str, str]: Mapping from ``effect name`` to ``usage`` + """ + return dict(torch.ops.torchaudio.sox_utils_list_effects())
+ + +
[docs]@_mod_utils.requires_sox() +def list_read_formats() -> List[str]: + """List the supported audio formats for read + + Returns: + List[str]: List of supported audio formats + """ + return torch.ops.torchaudio.sox_utils_list_read_formats()
+ + +
[docs]@_mod_utils.requires_sox() +def list_write_formats() -> List[str]: + """List the supported audio formats for write + + Returns: + List[str]: List of supported audio formats + """ + return torch.ops.torchaudio.sox_utils_list_write_formats()
+
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/_sources/backend.rst.txt b/0.9.0/_sources/backend.rst.txt new file mode 100644 index 0000000000..6eda225307 --- /dev/null +++ b/0.9.0/_sources/backend.rst.txt @@ -0,0 +1,92 @@ +.. _backend: + +torchaudio.backend +================== + +Overview +~~~~~~~~ + +:mod:`torchaudio.backend` module provides implementations for audio file I/O functionalities, which are ``torchaudio.info``, ``torchaudio.load``, and ``torchaudio.save``. + +There are currently four implementations available. + +* :ref:`"sox_io" ` (default on Linux/macOS) +* :ref:`"soundfile" ` (default on Windows) + +.. note:: + Instead of calling functions in ``torchaudio.backend`` directly, please use ``torchaudio.info``, ``torchaudio.load``, and ``torchaudio.save`` with proper backend set with :func:`torchaudio.set_audio_backend`. + +Availability +------------ + +``"sox_io"`` backend requires C++ extension module, which is included in Linux/macOS binary distributions. This backend is not available on Windows. + +``"soundfile"`` backend requires ``SoundFile``. Please refer to `the SoundFile documentation `_ for the installation. + +Common Data Structure +~~~~~~~~~~~~~~~~~~~~~ + +Structures used to report the metadata of audio files. + +AudioMetaData +------------- + +.. autoclass:: torchaudio.backend.common.AudioMetaData + +.. _sox_io_backend: + +Sox IO Backend +~~~~~~~~~~~~~~ + +The ``"sox_io"`` backend is available and default on Linux/macOS and not available on Windows. + +I/O functions of this backend support `TorchScript `_. + +You can switch from another backend to the ``sox_io`` backend with the following; + +.. code:: + + torchaudio.set_audio_backend("sox_io") + +info +---- + +.. autofunction:: torchaudio.backend.sox_io_backend.info + +load +---- + +.. autofunction:: torchaudio.backend.sox_io_backend.load + +save +---- + +.. autofunction:: torchaudio.backend.sox_io_backend.save + +.. _soundfile_backend: + +Soundfile Backend +~~~~~~~~~~~~~~~~~ + +The ``"soundfile"`` backend is available when `SoundFile `_ is installed. This backend is the default on Windows. + +You can switch from another backend to the ``"soundfile"`` backend with the following; + +.. code:: + + torchaudio.set_audio_backend("soundfile") + +info +---- + +.. autofunction:: torchaudio.backend.soundfile_backend.info + +load +---- + +.. autofunction:: torchaudio.backend.soundfile_backend.load + +save +---- + +.. autofunction:: torchaudio.backend.soundfile_backend.save diff --git a/0.9.0/_sources/compliance.kaldi.rst.txt b/0.9.0/_sources/compliance.kaldi.rst.txt new file mode 100644 index 0000000000..cc75021d69 --- /dev/null +++ b/0.9.0/_sources/compliance.kaldi.rst.txt @@ -0,0 +1,36 @@ +.. role:: hidden + :class: hidden-section + +torchaudio.compliance.kaldi +============================ + +.. currentmodule:: torchaudio.compliance.kaldi + +The useful processing operations of kaldi_ can be performed with torchaudio. +Various functions with identical parameters are given so that torchaudio can +produce similar outputs. + +.. _kaldi: https://github.com/kaldi-asr/kaldi + +Functions +--------- + +:hidden:`spectrogram` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: spectrogram + +:hidden:`fbank` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: fbank + +:hidden:`mfcc` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: mfcc + +:hidden:`resample_waveform` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: resample_waveform diff --git a/0.9.0/_sources/datasets.rst.txt b/0.9.0/_sources/datasets.rst.txt new file mode 100644 index 0000000000..2fbb2c7929 --- /dev/null +++ b/0.9.0/_sources/datasets.rst.txt @@ -0,0 +1,113 @@ +torchaudio.datasets +==================== + +All datasets are subclasses of :class:`torch.utils.data.Dataset` +and have ``__getitem__`` and ``__len__`` methods implemented. +Hence, they can all be passed to a :class:`torch.utils.data.DataLoader` +which can load multiple samples parallelly using ``torch.multiprocessing`` workers. +For example: :: + + yesno_data = torchaudio.datasets.YESNO('.', download=True) + data_loader = torch.utils.data.DataLoader(yesno_data, + batch_size=1, + shuffle=True, + num_workers=args.nThreads) + +The following datasets are available: + +.. contents:: Datasets + :local: + +All the datasets have almost similar API. They all have two common arguments: +``transform`` and ``target_transform`` to transform the input and target respectively. + + +.. currentmodule:: torchaudio.datasets + + +CMUARCTIC +~~~~~~~~~ + +.. autoclass:: CMUARCTIC + :members: + :special-members: __getitem__ + + +COMMONVOICE +~~~~~~~~~~~ + +.. autoclass:: COMMONVOICE + :members: + :special-members: __getitem__ + + +GTZAN +~~~~~ + +.. autoclass:: GTZAN + :members: + :special-members: __getitem__ + + +LIBRISPEECH +~~~~~~~~~~~ + +.. autoclass:: LIBRISPEECH + :members: + :special-members: __getitem__ + + +LIBRITTS +~~~~~~~~ + +.. autoclass:: LIBRITTS + :members: + :special-members: __getitem__ + + +LJSPEECH +~~~~~~~~ + +.. autoclass:: LJSPEECH + :members: + :special-members: __getitem__ + + +SPEECHCOMMANDS +~~~~~~~~~~~~~~ + +.. autoclass:: SPEECHCOMMANDS + :members: + :special-members: __getitem__ + + +TEDLIUM +~~~~~~~~~~~~~~ + +.. autoclass:: TEDLIUM + :members: + :special-members: __getitem__ + + +VCTK +~~~~ + +.. autoclass:: VCTK + :members: + :special-members: __getitem__ + + +VCTK_092 +~~~~~~~~ + +.. autoclass:: VCTK_092 + :members: + :special-members: __getitem__ + + +YESNO +~~~~~ + +.. autoclass:: YESNO + :members: + :special-members: __getitem__ diff --git a/0.9.0/_sources/functional.rst.txt b/0.9.0/_sources/functional.rst.txt new file mode 100644 index 0000000000..37593da3fc --- /dev/null +++ b/0.9.0/_sources/functional.rst.txt @@ -0,0 +1,242 @@ +.. role:: hidden + :class: hidden-section + +torchaudio.functional +===================== + +.. currentmodule:: torchaudio.functional + +Functions to perform common audio operations. + +:hidden:`Utility` +~~~~~~~~~~~~~~~~~ + +amplitude_to_DB +--------------- + +.. autofunction:: amplitude_to_DB + +DB_to_amplitude +--------------- + +.. autofunction:: DB_to_amplitude + +create_fb_matrix +---------------- + +.. autofunction:: create_fb_matrix + +create_dct +---------- + +.. autofunction:: create_dct + +mask_along_axis +--------------- + +.. autofunction:: mask_along_axis + +mask_along_axis_iid +------------------- + +.. autofunction:: mask_along_axis_iid + +mu_law_encoding +--------------- + +.. autofunction:: mu_law_encoding + +mu_law_decoding +--------------- + +.. autofunction:: mu_law_decoding + +apply_codec +----------- + +.. autofunction:: apply_codec + +resample +-------- + +.. autofunction:: resample + +:hidden:`Complex Utility` +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Utilities for pseudo complex tensor. This is not for the native complex dtype, such as `cfloat64`, but for tensors with real-value type and have extra dimension at the end for real and imaginary parts. + +angle +----- + +.. autofunction:: angle + +complex_norm +------------ + +.. autofunction:: complex_norm + + +magphase +-------- + +.. autofunction:: magphase + +:hidden:`Filtering` +~~~~~~~~~~~~~~~~~~~ + + +allpass_biquad +-------------- + +.. autofunction:: allpass_biquad + +band_biquad +----------- + +.. autofunction:: band_biquad + +bandpass_biquad +--------------- + +.. autofunction:: bandpass_biquad + +bandreject_biquad +----------------- + +.. autofunction:: bandreject_biquad + +bass_biquad +----------- + +.. autofunction:: bass_biquad + +biquad +------ + +.. autofunction:: biquad + +contrast +-------- + +.. autofunction:: contrast + +dcshift +------- + +.. autofunction:: dcshift + +deemph_biquad +------------- + +.. autofunction:: deemph_biquad + + +dither +------ + +.. autofunction:: dither + +equalizer_biquad +---------------- + +.. autofunction:: equalizer_biquad + +flanger +------- + +.. autofunction:: flanger + +gain +---- + +.. autofunction:: gain + +highpass_biquad +--------------- + +.. autofunction:: highpass_biquad + +lfilter +------- + +.. autofunction:: lfilter + +lowpass_biquad +-------------- + +.. autofunction:: lowpass_biquad + +overdrive +--------- + +.. autofunction:: overdrive + +phaser +------ + +.. autofunction:: phaser + +riaa_biquad +----------- + +.. autofunction:: riaa_biquad + +treble_biquad +------------- + +.. autofunction:: treble_biquad + + +vad +--- + +:hidden:`Feature Extractions` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: vad + +:hidden:`spectrogram` +--------------------- + +.. autofunction:: spectrogram + +:hidden:`griffinlim` +-------------------- + +.. autofunction:: griffinlim + +:hidden:`phase_vocoder` +----------------------- + +.. autofunction:: phase_vocoder + +:hidden:`compute_deltas` +------------------------ + +.. autofunction:: compute_deltas + +:hidden:`detect_pitch_frequency` +-------------------------------- + +.. autofunction:: detect_pitch_frequency + +:hidden:`sliding_window_cmn` +---------------------------- + +.. autofunction:: sliding_window_cmn + +:hidden:`compute_kaldi_pitch` +----------------------------- + +.. autofunction:: compute_kaldi_pitch + +:hidden:`spectral_centroid` +--------------------------- + +.. autofunction:: spectral_centroid + +References +~~~~~~~~~~ + +.. footbibliography:: diff --git a/0.9.0/_sources/index.rst.txt b/0.9.0/_sources/index.rst.txt new file mode 100644 index 0000000000..0f8dafdc81 --- /dev/null +++ b/0.9.0/_sources/index.rst.txt @@ -0,0 +1,54 @@ +torchaudio +========== +This library is part of the `PyTorch +`_ project. PyTorch is an open source +machine learning framework. + +Features described in this documentation are classified by release status: + + *Stable:* These features will be maintained long-term and there should generally + be no major performance limitations or gaps in documentation. + We also expect to maintain backwards compatibility (although + breaking changes can happen and notice will be given one release ahead + of time). + + *Beta:* Features are tagged as Beta because the API may change based on + user feedback, because the performance needs to improve, or because + coverage across operators is not yet complete. For Beta features, we are + committing to seeing the feature through to the Stable classification. + We are not, however, committing to backwards compatibility. + + *Prototype:* These features are typically not available as part of + binary distributions like PyPI or Conda, except sometimes behind run-time + flags, and are at an early stage for feedback and testing. + + +The :mod:`torchaudio` package consists of I/O, popular datasets and common audio transformations. + +.. toctree:: + :maxdepth: 2 + :caption: Package Reference + + torchaudio + backend + functional + transforms + datasets + models + sox_effects + compliance.kaldi + kaldi_io + utils + + +.. toctree:: + :maxdepth: 1 + :caption: PyTorch Libraries + + PyTorch + torchaudio + torchtext + torchvision + TorchElastic + TorchServe + PyTorch on XLA Devices diff --git a/0.9.0/_sources/kaldi_io.rst.txt b/0.9.0/_sources/kaldi_io.rst.txt new file mode 100644 index 0000000000..2744bcc897 --- /dev/null +++ b/0.9.0/_sources/kaldi_io.rst.txt @@ -0,0 +1,43 @@ +.. role:: hidden + :class: hidden-section + +torchaudio.kaldi_io +====================== + +.. currentmodule:: torchaudio.kaldi_io + +To use this module, the dependency kaldi_io_ needs to be installed. +This is a light wrapper around ``kaldi_io`` that returns :class:`torch.Tensor`. + +.. _kaldi_io: https://github.com/vesis84/kaldi-io-for-python + +Vectors +------- + +:hidden:`read_vec_int_ark` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: read_vec_int_ark + +:hidden:`read_vec_flt_scp` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: read_vec_flt_scp + +:hidden:`read_vec_flt_ark` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: read_vec_flt_ark + +Matrices +-------- + +:hidden:`read_mat_scp` +~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: read_mat_scp + +:hidden:`read_mat_ark` +~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: read_mat_ark diff --git a/0.9.0/_sources/models.rst.txt b/0.9.0/_sources/models.rst.txt new file mode 100644 index 0000000000..39e162baa0 --- /dev/null +++ b/0.9.0/_sources/models.rst.txt @@ -0,0 +1,95 @@ +.. role:: hidden + :class: hidden-section + +torchaudio.models +================= + +.. currentmodule:: torchaudio.models + +The models subpackage contains definitions of models for addressing common audio tasks. + + +ConvTasNet +~~~~~~~~~~ + +.. autoclass:: ConvTasNet + + .. automethod:: forward + + +DeepSpeech +~~~~~~~~~~ + +.. autoclass:: DeepSpeech + + .. automethod:: forward + + +Wav2Letter +~~~~~~~~~~ + +.. autoclass:: Wav2Letter + + .. automethod:: forward + + + +Wav2Vec2.0 +~~~~~~~~~~ + +Wav2Vec2Model +------------- + +.. autoclass:: Wav2Vec2Model + + .. automethod:: extract_features + + .. automethod:: forward + +Factory Functions +----------------- + +wav2vec2_base +------------- + +.. autofunction:: wav2vec2_base + +wav2vec2_large +-------------- + +.. autofunction:: wav2vec2_large + +wav2vec2_large_lv60k +-------------------- + +.. autofunction:: wav2vec2_large_lv60k + +.. currentmodule:: torchaudio.models.wav2vec2.utils + +Utility Functions +----------------- + +import_huggingface_model +------------------------ + +.. autofunction:: import_huggingface_model + +import_fairseq_model +-------------------- + +.. autofunction:: import_fairseq_model + +.. currentmodule:: torchaudio.models + +WaveRNN +~~~~~~~ + +.. autoclass:: WaveRNN + + .. automethod:: forward + +References +~~~~~~~~~~ + +.. footbibliography:: + diff --git a/0.9.0/_sources/sox_effects.rst.txt b/0.9.0/_sources/sox_effects.rst.txt new file mode 100644 index 0000000000..6eee11d8c7 --- /dev/null +++ b/0.9.0/_sources/sox_effects.rst.txt @@ -0,0 +1,33 @@ +.. _sox_effects: + +torchaudio.sox_effects +====================== + +.. currentmodule:: torchaudio.sox_effects + +Resource initialization / shutdown +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: init_sox_effects + +.. autofunction:: shutdown_sox_effects + +Listing supported effects +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: effect_names + +Applying effects +~~~~~~~~~~~~~~~~ + +Apply SoX effects chain on torch.Tensor or on file and load as torch.Tensor. + +Applying effects on Tensor +-------------------------- + +.. autofunction:: apply_effects_tensor + +Applying effects on file +------------------------ + +.. autofunction:: apply_effects_file diff --git a/0.9.0/_sources/torchaudio.rst.txt b/0.9.0/_sources/torchaudio.rst.txt new file mode 100644 index 0000000000..cb616d01aa --- /dev/null +++ b/0.9.0/_sources/torchaudio.rst.txt @@ -0,0 +1,32 @@ +torchaudio +========== + +I/O functionalities +~~~~~~~~~~~~~~~~~~~ + +Audio I/O functions are implemented in :ref:`torchaudio.backend` module, but for the ease of use, the following functions are made available on :mod:`torchaudio` module. There are different backends available and you can switch backends with :func:`set_audio_backend`. + +Refer to :ref:`backend` for the detail. + +.. function:: torchaudio.info(filepath: str, ...) + + Fetch meta data of an audio file. Refer to :ref:`backend` for the detail. + +.. function:: torchaudio.load(filepath: str, ...) + + Load audio file into torch.Tensor object. Refer to :ref:`backend` for the detail. + +.. function:: torchaudio.save(filepath: str, src: torch.Tensor, sample_rate: int, ...) + + Save torch.Tensor object into an audio format. Refer to :ref:`backend` for the detail. + +.. currentmodule:: torchaudio + +Backend Utilities +~~~~~~~~~~~~~~~~~ + +.. autofunction:: list_audio_backends + +.. autofunction:: get_audio_backend + +.. autofunction:: set_audio_backend diff --git a/0.9.0/_sources/transforms.rst.txt b/0.9.0/_sources/transforms.rst.txt new file mode 100644 index 0000000000..787673f8df --- /dev/null +++ b/0.9.0/_sources/transforms.rst.txt @@ -0,0 +1,157 @@ +.. role:: hidden + :class: hidden-section + +torchaudio.transforms +====================== + +.. currentmodule:: torchaudio.transforms + +Transforms are common audio transforms. They can be chained together using :class:`torch.nn.Sequential` + + +:hidden:`Spectrogram` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Spectrogram + + .. automethod:: forward + +:hidden:`GriffinLim` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: GriffinLim + + .. automethod:: forward + +:hidden:`AmplitudeToDB` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: AmplitudeToDB + + .. automethod:: forward + +:hidden:`MelScale` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MelScale + + .. automethod:: forward + +:hidden:`InverseMelScale` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: InverseMelScale + + .. automethod:: forward + + +:hidden:`MelSpectrogram` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MelSpectrogram + + .. automethod:: forward + +:hidden:`MFCC` +~~~~~~~~~~~~~~ + +.. autoclass:: MFCC + + .. automethod:: forward + +:hidden:`MuLawEncoding` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MuLawEncoding + + .. automethod:: forward + +:hidden:`MuLawDecoding` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MuLawDecoding + + .. automethod:: forward + +:hidden:`Resample` +~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Resample + + .. automethod:: forward + +:hidden:`ComplexNorm` +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ComplexNorm + + .. automethod:: forward + +:hidden:`ComputeDeltas` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ComputeDeltas + + .. automethod:: forward + +:hidden:`TimeStretch` +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: TimeStretch + + .. automethod:: forward + +:hidden:`Fade` +~~~~~~~~~~~~~~ + +.. autoclass:: Fade + + .. automethod:: forward + +:hidden:`FrequencyMasking` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: FrequencyMasking + + .. automethod:: forward + +:hidden:`TimeMasking` +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: TimeMasking + + .. automethod:: forward + +:hidden:`Vol` +~~~~~~~~~~~~~ + +.. autoclass:: Vol + + .. automethod:: forward + +:hidden:`SlidingWindowCmn` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: SlidingWindowCmn + + .. automethod:: forward + +:hidden:`SpectralCentroid` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: SpectralCentroid + + .. automethod:: forward + +:hidden:`Vad` +~~~~~~~~~~~~~ + +.. autoclass:: Vad + + .. automethod:: forward + + +References +~~~~~~~~~~ + +.. footbibliography:: diff --git a/0.9.0/_sources/utils.rst.txt b/0.9.0/_sources/utils.rst.txt new file mode 100644 index 0000000000..dc5ad0fd73 --- /dev/null +++ b/0.9.0/_sources/utils.rst.txt @@ -0,0 +1,11 @@ +torchaudio.utils +================ + +torchaudio.utils.sox_utils +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Utility module to configure libsox. +This affects functionalities in :ref:`Sox IO backend` and :ref:`Sox Effects`. + +.. automodule:: torchaudio.utils.sox_utils + :members: diff --git a/0.9.0/_static/basic.css b/0.9.0/_static/basic.css new file mode 100644 index 0000000000..01192852b5 --- /dev/null +++ b/0.9.0/_static/basic.css @@ -0,0 +1,768 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 450px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a.brackets:before, +span.brackets > a:before{ + content: "["; +} + +a.brackets:after, +span.brackets > a:after { + content: "]"; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > p:first-child, +td > p:first-child { + margin-top: 0px; +} + +th > p:last-child, +td > p:last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist td { + vertical-align: top; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +li > p:first-child { + margin-top: 0px; +} + +li > p:last-child { + margin-bottom: 0px; +} + +dl.footnote > dt, +dl.citation > dt { + float: left; +} + +dl.footnote > dd, +dl.citation > dd { + margin-bottom: 0em; +} + +dl.footnote > dd:after, +dl.citation > dd:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dt:after { + content: ":"; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > p:first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0.5em; + content: ":"; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +div.code-block-caption { + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; +} + +div.doctest > div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: relative; + left: 0px; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/0.9.0/_static/css/override.css b/0.9.0/_static/css/override.css new file mode 100644 index 0000000000..a97d8c91ae --- /dev/null +++ b/0.9.0/_static/css/override.css @@ -0,0 +1,8 @@ +/* Fix for bibtex reference */ +dl.footnote.brackets > dt.label > span.brackets > a.fn-backref { + position: inherit +} +/* Fix for bibtex back reference */ +dl.footnote.brackets > dt.label > span.fn-backref > a { + position: inherit +} diff --git a/0.9.0/_static/css/theme.css b/0.9.0/_static/css/theme.css new file mode 100644 index 0000000000..2010253147 --- /dev/null +++ b/0.9.0/_static/css/theme.css @@ -0,0 +1,12379 @@ +@charset "UTF-8"; +/*! + * Bootstrap v4.0.0 (https://getbootstrap.com) + * Copyright 2011-2018 The Bootstrap Authors + * Copyright 2011-2018 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */ +:root { + --blue: #007bff; + --indigo: #6610f2; + --purple: #6f42c1; + --pink: #e83e8c; + --red: #dc3545; + --orange: #fd7e14; + --yellow: #ffc107; + --green: #28a745; + --teal: #20c997; + --cyan: #17a2b8; + --white: #fff; + --gray: #6c757d; + --gray-dark: #343a40; + --primary: #007bff; + --secondary: #6c757d; + --success: #28a745; + --info: #17a2b8; + --warning: #ffc107; + --danger: #dc3545; + --light: #f8f9fa; + --dark: #343a40; + --breakpoint-xs: 0; + --breakpoint-sm: 576px; + --breakpoint-md: 768px; + --breakpoint-lg: 992px; + --breakpoint-xl: 1200px; + --font-family-sans-serif: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + --font-family-monospace: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; +} + +*, +*::before, +*::after { + -webkit-box-sizing: border-box; + box-sizing: border-box; +} + +html { + font-family: sans-serif; + line-height: 1.15; + -webkit-text-size-adjust: 100%; + -ms-text-size-adjust: 100%; + -ms-overflow-style: scrollbar; + -webkit-tap-highlight-color: rgba(0, 0, 0, 0); +} + +@-ms-viewport { + width: device-width; +} +article, aside, dialog, figcaption, figure, footer, header, hgroup, main, nav, section { + display: block; +} + +body { + margin: 0; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-size: 1rem; + font-weight: 400; + line-height: 1.5; + color: #212529; + text-align: left; + background-color: #fff; +} + +[tabindex="-1"]:focus { + outline: 0 !important; +} + +hr { + -webkit-box-sizing: content-box; + box-sizing: content-box; + height: 0; + overflow: visible; +} + +h1, h2, h3, h4, h5, h6 { + margin-top: 0; + margin-bottom: 0.5rem; +} + +p { + margin-top: 0; + margin-bottom: 1rem; +} + +abbr[title], +abbr[data-original-title] { + text-decoration: underline; + -webkit-text-decoration: underline dotted; + text-decoration: underline dotted; + cursor: help; + border-bottom: 0; +} + +address { + margin-bottom: 1rem; + font-style: normal; + line-height: inherit; +} + +ol, +ul, +dl { + margin-top: 0; + margin-bottom: 1rem; +} + +ol ol, +ul ul, +ol ul, +ul ol { + margin-bottom: 0; +} + +dt { + font-weight: 700; +} + +dd { + margin-bottom: .5rem; + margin-left: 0; +} + +blockquote { + margin: 0 0 1rem; +} + +dfn { + font-style: italic; +} + +b, +strong { + font-weight: bolder; +} + +small { + font-size: 80%; +} + +sub, +sup { + position: relative; + font-size: 75%; + line-height: 0; + vertical-align: baseline; +} + +sub { + bottom: -.25em; +} + +sup { + top: -.5em; +} + +a { + color: #007bff; + text-decoration: none; + background-color: transparent; + -webkit-text-decoration-skip: objects; +} +a:hover { + color: #0056b3; + text-decoration: underline; +} + +a:not([href]):not([tabindex]) { + color: inherit; + text-decoration: none; +} +a:not([href]):not([tabindex]):hover, a:not([href]):not([tabindex]):focus { + color: inherit; + text-decoration: none; +} +a:not([href]):not([tabindex]):focus { + outline: 0; +} + +pre, +code, +kbd, +samp { + font-family: monospace, monospace; + font-size: 1em; +} + +pre { + margin-top: 0; + margin-bottom: 1rem; + overflow: auto; + -ms-overflow-style: scrollbar; +} + +figure { + margin: 0 0 1rem; +} + +img { + vertical-align: middle; + border-style: none; +} + +svg:not(:root) { + overflow: hidden; +} + +table { + border-collapse: collapse; +} + +caption { + padding-top: 0.75rem; + padding-bottom: 0.75rem; + color: #6c757d; + text-align: left; + caption-side: bottom; +} + +th { + text-align: inherit; +} + +label { + display: inline-block; + margin-bottom: .5rem; +} + +button { + border-radius: 0; +} + +button:focus { + outline: 1px dotted; + outline: 5px auto -webkit-focus-ring-color; +} + +input, +button, +select, +optgroup, +textarea { + margin: 0; + font-family: inherit; + font-size: inherit; + line-height: inherit; +} + +button, +input { + overflow: visible; +} + +button, +select { + text-transform: none; +} + +button, +html [type="button"], +[type="reset"], +[type="submit"] { + -webkit-appearance: button; +} + +button::-moz-focus-inner, +[type="button"]::-moz-focus-inner, +[type="reset"]::-moz-focus-inner, +[type="submit"]::-moz-focus-inner { + padding: 0; + border-style: none; +} + +input[type="radio"], +input[type="checkbox"] { + -webkit-box-sizing: border-box; + box-sizing: border-box; + padding: 0; +} + +input[type="date"], +input[type="time"], +input[type="datetime-local"], +input[type="month"] { + -webkit-appearance: listbox; +} + +textarea { + overflow: auto; + resize: vertical; +} + +fieldset { + min-width: 0; + padding: 0; + margin: 0; + border: 0; +} + +legend { + display: block; + width: 100%; + max-width: 100%; + padding: 0; + margin-bottom: .5rem; + font-size: 1.5rem; + line-height: inherit; + color: inherit; + white-space: normal; +} + +progress { + vertical-align: baseline; +} + +[type="number"]::-webkit-inner-spin-button, +[type="number"]::-webkit-outer-spin-button { + height: auto; +} + +[type="search"] { + outline-offset: -2px; + -webkit-appearance: none; +} + +[type="search"]::-webkit-search-cancel-button, +[type="search"]::-webkit-search-decoration { + -webkit-appearance: none; +} + +::-webkit-file-upload-button { + font: inherit; + -webkit-appearance: button; +} + +output { + display: inline-block; +} + +summary { + display: list-item; + cursor: pointer; +} + +template { + display: none; +} + +[hidden] { + display: none !important; +} + +h1, h2, h3, h4, h5, h6, +.h1, .h2, .h3, .h4, .h5, .h6 { + margin-bottom: 0.5rem; + font-family: inherit; + font-weight: 500; + line-height: 1.2; + color: inherit; +} + +h1, .h1 { + font-size: 2.5rem; +} + +h2, .h2 { + font-size: 2rem; +} + +h3, .h3 { + font-size: 1.75rem; +} + +h4, .h4 { + font-size: 1.5rem; +} + +h5, .h5 { + font-size: 1.25rem; +} + +h6, .h6 { + font-size: 1rem; +} + +.lead { + font-size: 1.25rem; + font-weight: 300; +} + +.display-1 { + font-size: 6rem; + font-weight: 300; + line-height: 1.2; +} + +.display-2 { + font-size: 5.5rem; + font-weight: 300; + line-height: 1.2; +} + +.display-3 { + font-size: 4.5rem; + font-weight: 300; + line-height: 1.2; +} + +.display-4 { + font-size: 3.5rem; + font-weight: 300; + line-height: 1.2; +} + +hr { + margin-top: 1rem; + margin-bottom: 1rem; + border: 0; + border-top: 1px solid rgba(0, 0, 0, 0.1); +} + +small, +.small { + font-size: 80%; + font-weight: 400; +} + +mark, +.mark { + padding: 0.2em; + background-color: #fcf8e3; +} + +.list-unstyled { + padding-left: 0; + list-style: none; +} + +.list-inline { + padding-left: 0; + list-style: none; +} + +.list-inline-item { + display: inline-block; +} +.list-inline-item:not(:last-child) { + margin-right: 0.5rem; +} + +.initialism { + font-size: 90%; + text-transform: uppercase; +} + +.blockquote { + margin-bottom: 1rem; + font-size: 1.25rem; +} + +.blockquote-footer { + display: block; + font-size: 80%; + color: #6c757d; +} +.blockquote-footer::before { + content: "\2014 \00A0"; +} + +.img-fluid { + max-width: 100%; + height: auto; +} + +.img-thumbnail { + padding: 0.25rem; + background-color: #fff; + border: 1px solid #dee2e6; + border-radius: 0.25rem; + max-width: 100%; + height: auto; +} + +.figure { + display: inline-block; +} + +.figure-img { + margin-bottom: 0.5rem; + line-height: 1; +} + +.figure-caption { + font-size: 90%; + color: #6c757d; +} + +code, +kbd, +pre, +samp { + font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; +} + +code { + font-size: 87.5%; + color: #e83e8c; + word-break: break-word; +} +a > code { + color: inherit; +} + +kbd { + padding: 0.2rem 0.4rem; + font-size: 87.5%; + color: #fff; + background-color: #212529; + border-radius: 0.2rem; +} +kbd kbd { + padding: 0; + font-size: 100%; + font-weight: 700; +} + +pre { + display: block; + font-size: 87.5%; + color: #212529; +} +pre code { + font-size: inherit; + color: inherit; + word-break: normal; +} + +.pre-scrollable { + max-height: 340px; + overflow-y: scroll; +} + +.container { + width: 100%; + padding-right: 15px; + padding-left: 15px; + margin-right: auto; + margin-left: auto; +} +@media (min-width: 576px) { + .container { + max-width: 540px; + } +} +@media (min-width: 768px) { + .container { + max-width: 720px; + } +} +@media (min-width: 992px) { + .container { + max-width: 960px; + } +} +@media (min-width: 1200px) { + .container { + max-width: 1140px; + } +} + +.container-fluid { + width: 100%; + padding-right: 15px; + padding-left: 15px; + margin-right: auto; + margin-left: auto; +} + +.row { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + margin-right: -15px; + margin-left: -15px; +} + +.no-gutters { + margin-right: 0; + margin-left: 0; +} +.no-gutters > .col, +.no-gutters > [class*="col-"] { + padding-right: 0; + padding-left: 0; +} + +.col-1, .col-2, .col-3, .col-4, .col-5, .col-6, .col-7, .col-8, .col-9, .col-10, .col-11, .col-12, .col, +.col-auto, .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12, .col-sm, +.col-sm-auto, .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12, .col-md, +.col-md-auto, .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12, .col-lg, +.col-lg-auto, .col-xl-1, .col-xl-2, .col-xl-3, .col-xl-4, .col-xl-5, .col-xl-6, .col-xl-7, .col-xl-8, .col-xl-9, .col-xl-10, .col-xl-11, .col-xl-12, .col-xl, +.col-xl-auto { + position: relative; + width: 100%; + min-height: 1px; + padding-right: 15px; + padding-left: 15px; +} + +.col { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; +} + +.col-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; +} + +.col-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; +} + +.col-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; +} + +.col-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; +} + +.col-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; +} + +.col-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; +} + +.col-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; +} + +.col-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; +} + +.col-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; +} + +.col-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; +} + +.col-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; +} + +.col-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; +} + +.col-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; +} + +.order-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; +} + +.order-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; +} + +.order-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; +} + +.order-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; +} + +.order-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; +} + +.order-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; +} + +.order-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; +} + +.order-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; +} + +.order-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; +} + +.order-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; +} + +.order-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; +} + +.order-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; +} + +.order-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; +} + +.order-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; +} + +.order-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; +} + +.offset-1 { + margin-left: 8.3333333333%; +} + +.offset-2 { + margin-left: 16.6666666667%; +} + +.offset-3 { + margin-left: 25%; +} + +.offset-4 { + margin-left: 33.3333333333%; +} + +.offset-5 { + margin-left: 41.6666666667%; +} + +.offset-6 { + margin-left: 50%; +} + +.offset-7 { + margin-left: 58.3333333333%; +} + +.offset-8 { + margin-left: 66.6666666667%; +} + +.offset-9 { + margin-left: 75%; +} + +.offset-10 { + margin-left: 83.3333333333%; +} + +.offset-11 { + margin-left: 91.6666666667%; +} + +@media (min-width: 576px) { + .col-sm { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + + .col-sm-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; + } + + .col-sm-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; + } + + .col-sm-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; + } + + .col-sm-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + + .col-sm-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; + } + + .col-sm-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; + } + + .col-sm-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + + .col-sm-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; + } + + .col-sm-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; + } + + .col-sm-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + + .col-sm-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; + } + + .col-sm-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; + } + + .col-sm-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + + .order-sm-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; + } + + .order-sm-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; + } + + .order-sm-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; + } + + .order-sm-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; + } + + .order-sm-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; + } + + .order-sm-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; + } + + .order-sm-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; + } + + .order-sm-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; + } + + .order-sm-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; + } + + .order-sm-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; + } + + .order-sm-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; + } + + .order-sm-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; + } + + .order-sm-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; + } + + .order-sm-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; + } + + .order-sm-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; + } + + .offset-sm-0 { + margin-left: 0; + } + + .offset-sm-1 { + margin-left: 8.3333333333%; + } + + .offset-sm-2 { + margin-left: 16.6666666667%; + } + + .offset-sm-3 { + margin-left: 25%; + } + + .offset-sm-4 { + margin-left: 33.3333333333%; + } + + .offset-sm-5 { + margin-left: 41.6666666667%; + } + + .offset-sm-6 { + margin-left: 50%; + } + + .offset-sm-7 { + margin-left: 58.3333333333%; + } + + .offset-sm-8 { + margin-left: 66.6666666667%; + } + + .offset-sm-9 { + margin-left: 75%; + } + + .offset-sm-10 { + margin-left: 83.3333333333%; + } + + .offset-sm-11 { + margin-left: 91.6666666667%; + } +} +@media (min-width: 768px) { + .col-md { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + + .col-md-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; + } + + .col-md-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; + } + + .col-md-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; + } + + .col-md-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + + .col-md-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; + } + + .col-md-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; + } + + .col-md-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + + .col-md-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; + } + + .col-md-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; + } + + .col-md-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + + .col-md-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; + } + + .col-md-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; + } + + .col-md-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + + .order-md-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; + } + + .order-md-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; + } + + .order-md-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; + } + + .order-md-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; + } + + .order-md-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; + } + + .order-md-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; + } + + .order-md-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; + } + + .order-md-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; + } + + .order-md-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; + } + + .order-md-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; + } + + .order-md-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; + } + + .order-md-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; + } + + .order-md-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; + } + + .order-md-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; + } + + .order-md-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; + } + + .offset-md-0 { + margin-left: 0; + } + + .offset-md-1 { + margin-left: 8.3333333333%; + } + + .offset-md-2 { + margin-left: 16.6666666667%; + } + + .offset-md-3 { + margin-left: 25%; + } + + .offset-md-4 { + margin-left: 33.3333333333%; + } + + .offset-md-5 { + margin-left: 41.6666666667%; + } + + .offset-md-6 { + margin-left: 50%; + } + + .offset-md-7 { + margin-left: 58.3333333333%; + } + + .offset-md-8 { + margin-left: 66.6666666667%; + } + + .offset-md-9 { + margin-left: 75%; + } + + .offset-md-10 { + margin-left: 83.3333333333%; + } + + .offset-md-11 { + margin-left: 91.6666666667%; + } +} +@media (min-width: 992px) { + .col-lg { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + + .col-lg-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; + } + + .col-lg-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; + } + + .col-lg-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; + } + + .col-lg-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + + .col-lg-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; + } + + .col-lg-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; + } + + .col-lg-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + + .col-lg-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; + } + + .col-lg-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; + } + + .col-lg-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + + .col-lg-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; + } + + .col-lg-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; + } + + .col-lg-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + + .order-lg-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; + } + + .order-lg-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; + } + + .order-lg-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; + } + + .order-lg-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; + } + + .order-lg-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; + } + + .order-lg-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; + } + + .order-lg-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; + } + + .order-lg-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; + } + + .order-lg-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; + } + + .order-lg-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; + } + + .order-lg-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; + } + + .order-lg-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; + } + + .order-lg-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; + } + + .order-lg-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; + } + + .order-lg-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; + } + + .offset-lg-0 { + margin-left: 0; + } + + .offset-lg-1 { + margin-left: 8.3333333333%; + } + + .offset-lg-2 { + margin-left: 16.6666666667%; + } + + .offset-lg-3 { + margin-left: 25%; + } + + .offset-lg-4 { + margin-left: 33.3333333333%; + } + + .offset-lg-5 { + margin-left: 41.6666666667%; + } + + .offset-lg-6 { + margin-left: 50%; + } + + .offset-lg-7 { + margin-left: 58.3333333333%; + } + + .offset-lg-8 { + margin-left: 66.6666666667%; + } + + .offset-lg-9 { + margin-left: 75%; + } + + .offset-lg-10 { + margin-left: 83.3333333333%; + } + + .offset-lg-11 { + margin-left: 91.6666666667%; + } +} +@media (min-width: 1200px) { + .col-xl { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + + .col-xl-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; + } + + .col-xl-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; + } + + .col-xl-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; + } + + .col-xl-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + + .col-xl-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; + } + + .col-xl-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; + } + + .col-xl-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + + .col-xl-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; + } + + .col-xl-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; + } + + .col-xl-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + + .col-xl-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; + } + + .col-xl-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; + } + + .col-xl-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + + .order-xl-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; + } + + .order-xl-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; + } + + .order-xl-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; + } + + .order-xl-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; + } + + .order-xl-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; + } + + .order-xl-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; + } + + .order-xl-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; + } + + .order-xl-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; + } + + .order-xl-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; + } + + .order-xl-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; + } + + .order-xl-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; + } + + .order-xl-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; + } + + .order-xl-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; + } + + .order-xl-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; + } + + .order-xl-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; + } + + .offset-xl-0 { + margin-left: 0; + } + + .offset-xl-1 { + margin-left: 8.3333333333%; + } + + .offset-xl-2 { + margin-left: 16.6666666667%; + } + + .offset-xl-3 { + margin-left: 25%; + } + + .offset-xl-4 { + margin-left: 33.3333333333%; + } + + .offset-xl-5 { + margin-left: 41.6666666667%; + } + + .offset-xl-6 { + margin-left: 50%; + } + + .offset-xl-7 { + margin-left: 58.3333333333%; + } + + .offset-xl-8 { + margin-left: 66.6666666667%; + } + + .offset-xl-9 { + margin-left: 75%; + } + + .offset-xl-10 { + margin-left: 83.3333333333%; + } + + .offset-xl-11 { + margin-left: 91.6666666667%; + } +} +.table { + width: 100%; + max-width: 100%; + margin-bottom: 1rem; + background-color: transparent; +} +.table th, +.table td { + padding: 0.75rem; + vertical-align: top; + border-top: 1px solid #dee2e6; +} +.table thead th { + vertical-align: bottom; + border-bottom: 2px solid #dee2e6; +} +.table tbody + tbody { + border-top: 2px solid #dee2e6; +} +.table .table { + background-color: #fff; +} + +.table-sm th, +.table-sm td { + padding: 0.3rem; +} + +.table-bordered { + border: 1px solid #dee2e6; +} +.table-bordered th, +.table-bordered td { + border: 1px solid #dee2e6; +} +.table-bordered thead th, +.table-bordered thead td { + border-bottom-width: 2px; +} + +.table-striped tbody tr:nth-of-type(odd) { + background-color: rgba(0, 0, 0, 0.05); +} + +.table-hover tbody tr:hover { + background-color: rgba(0, 0, 0, 0.075); +} + +.table-primary, +.table-primary > th, +.table-primary > td { + background-color: #b8daff; +} + +.table-hover .table-primary:hover { + background-color: #9fcdff; +} +.table-hover .table-primary:hover > td, +.table-hover .table-primary:hover > th { + background-color: #9fcdff; +} + +.table-secondary, +.table-secondary > th, +.table-secondary > td { + background-color: #d6d8db; +} + +.table-hover .table-secondary:hover { + background-color: #c8cbcf; +} +.table-hover .table-secondary:hover > td, +.table-hover .table-secondary:hover > th { + background-color: #c8cbcf; +} + +.table-success, +.table-success > th, +.table-success > td { + background-color: #c3e6cb; +} + +.table-hover .table-success:hover { + background-color: #b1dfbb; +} +.table-hover .table-success:hover > td, +.table-hover .table-success:hover > th { + background-color: #b1dfbb; +} + +.table-info, +.table-info > th, +.table-info > td { + background-color: #bee5eb; +} + +.table-hover .table-info:hover { + background-color: #abdde5; +} +.table-hover .table-info:hover > td, +.table-hover .table-info:hover > th { + background-color: #abdde5; +} + +.table-warning, +.table-warning > th, +.table-warning > td { + background-color: #ffeeba; +} + +.table-hover .table-warning:hover { + background-color: #ffe8a1; +} +.table-hover .table-warning:hover > td, +.table-hover .table-warning:hover > th { + background-color: #ffe8a1; +} + +.table-danger, +.table-danger > th, +.table-danger > td { + background-color: #f5c6cb; +} + +.table-hover .table-danger:hover { + background-color: #f1b0b7; +} +.table-hover .table-danger:hover > td, +.table-hover .table-danger:hover > th { + background-color: #f1b0b7; +} + +.table-light, +.table-light > th, +.table-light > td { + background-color: #fdfdfe; +} + +.table-hover .table-light:hover { + background-color: #ececf6; +} +.table-hover .table-light:hover > td, +.table-hover .table-light:hover > th { + background-color: #ececf6; +} + +.table-dark, +.table-dark > th, +.table-dark > td { + background-color: #c6c8ca; +} + +.table-hover .table-dark:hover { + background-color: #b9bbbe; +} +.table-hover .table-dark:hover > td, +.table-hover .table-dark:hover > th { + background-color: #b9bbbe; +} + +.table-active, +.table-active > th, +.table-active > td { + background-color: rgba(0, 0, 0, 0.075); +} + +.table-hover .table-active:hover { + background-color: rgba(0, 0, 0, 0.075); +} +.table-hover .table-active:hover > td, +.table-hover .table-active:hover > th { + background-color: rgba(0, 0, 0, 0.075); +} + +.table .thead-dark th { + color: #fff; + background-color: #212529; + border-color: #32383e; +} +.table .thead-light th { + color: #495057; + background-color: #e9ecef; + border-color: #dee2e6; +} + +.table-dark { + color: #fff; + background-color: #212529; +} +.table-dark th, +.table-dark td, +.table-dark thead th { + border-color: #32383e; +} +.table-dark.table-bordered { + border: 0; +} +.table-dark.table-striped tbody tr:nth-of-type(odd) { + background-color: rgba(255, 255, 255, 0.05); +} +.table-dark.table-hover tbody tr:hover { + background-color: rgba(255, 255, 255, 0.075); +} + +@media (max-width: 575.98px) { + .table-responsive-sm { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; + } + .table-responsive-sm > .table-bordered { + border: 0; + } +} +@media (max-width: 767.98px) { + .table-responsive-md { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; + } + .table-responsive-md > .table-bordered { + border: 0; + } +} +@media (max-width: 991.98px) { + .table-responsive-lg { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; + } + .table-responsive-lg > .table-bordered { + border: 0; + } +} +@media (max-width: 1199.98px) { + .table-responsive-xl { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; + } + .table-responsive-xl > .table-bordered { + border: 0; + } +} +.table-responsive { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; +} +.table-responsive > .table-bordered { + border: 0; +} + +.form-control { + display: block; + width: 100%; + padding: 0.375rem 0.75rem; + font-size: 1rem; + line-height: 1.5; + color: #495057; + background-color: #fff; + background-clip: padding-box; + border: 1px solid #ced4da; + border-radius: 0.25rem; + -webkit-transition: border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; + transition: border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; + transition: border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; + transition: border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; +} +.form-control::-ms-expand { + background-color: transparent; + border: 0; +} +.form-control:focus { + color: #495057; + background-color: #fff; + border-color: #80bdff; + outline: 0; + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.form-control::-webkit-input-placeholder { + color: #6c757d; + opacity: 1; +} +.form-control::-moz-placeholder { + color: #6c757d; + opacity: 1; +} +.form-control:-ms-input-placeholder { + color: #6c757d; + opacity: 1; +} +.form-control::-ms-input-placeholder { + color: #6c757d; + opacity: 1; +} +.form-control::placeholder { + color: #6c757d; + opacity: 1; +} +.form-control:disabled, .form-control[readonly] { + background-color: #e9ecef; + opacity: 1; +} + +select.form-control:not([size]):not([multiple]) { + height: calc(2.25rem + 2px); +} +select.form-control:focus::-ms-value { + color: #495057; + background-color: #fff; +} + +.form-control-file, +.form-control-range { + display: block; + width: 100%; +} + +.col-form-label { + padding-top: calc(0.375rem + 1px); + padding-bottom: calc(0.375rem + 1px); + margin-bottom: 0; + font-size: inherit; + line-height: 1.5; +} + +.col-form-label-lg { + padding-top: calc(0.5rem + 1px); + padding-bottom: calc(0.5rem + 1px); + font-size: 1.25rem; + line-height: 1.5; +} + +.col-form-label-sm { + padding-top: calc(0.25rem + 1px); + padding-bottom: calc(0.25rem + 1px); + font-size: 0.875rem; + line-height: 1.5; +} + +.form-control-plaintext { + display: block; + width: 100%; + padding-top: 0.375rem; + padding-bottom: 0.375rem; + margin-bottom: 0; + line-height: 1.5; + background-color: transparent; + border: solid transparent; + border-width: 1px 0; +} +.form-control-plaintext.form-control-sm, .input-group-sm > .form-control-plaintext.form-control, +.input-group-sm > .input-group-prepend > .form-control-plaintext.input-group-text, +.input-group-sm > .input-group-append > .form-control-plaintext.input-group-text, +.input-group-sm > .input-group-prepend > .form-control-plaintext.btn, +.input-group-sm > .input-group-append > .form-control-plaintext.btn, .form-control-plaintext.form-control-lg, .input-group-lg > .form-control-plaintext.form-control, +.input-group-lg > .input-group-prepend > .form-control-plaintext.input-group-text, +.input-group-lg > .input-group-append > .form-control-plaintext.input-group-text, +.input-group-lg > .input-group-prepend > .form-control-plaintext.btn, +.input-group-lg > .input-group-append > .form-control-plaintext.btn { + padding-right: 0; + padding-left: 0; +} + +.form-control-sm, .input-group-sm > .form-control, +.input-group-sm > .input-group-prepend > .input-group-text, +.input-group-sm > .input-group-append > .input-group-text, +.input-group-sm > .input-group-prepend > .btn, +.input-group-sm > .input-group-append > .btn { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + line-height: 1.5; + border-radius: 0.2rem; +} + +select.form-control-sm:not([size]):not([multiple]), .input-group-sm > select.form-control:not([size]):not([multiple]), +.input-group-sm > .input-group-prepend > select.input-group-text:not([size]):not([multiple]), +.input-group-sm > .input-group-append > select.input-group-text:not([size]):not([multiple]), +.input-group-sm > .input-group-prepend > select.btn:not([size]):not([multiple]), +.input-group-sm > .input-group-append > select.btn:not([size]):not([multiple]) { + height: calc(1.8125rem + 2px); +} + +.form-control-lg, .input-group-lg > .form-control, +.input-group-lg > .input-group-prepend > .input-group-text, +.input-group-lg > .input-group-append > .input-group-text, +.input-group-lg > .input-group-prepend > .btn, +.input-group-lg > .input-group-append > .btn { + padding: 0.5rem 1rem; + font-size: 1.25rem; + line-height: 1.5; + border-radius: 0.3rem; +} + +select.form-control-lg:not([size]):not([multiple]), .input-group-lg > select.form-control:not([size]):not([multiple]), +.input-group-lg > .input-group-prepend > select.input-group-text:not([size]):not([multiple]), +.input-group-lg > .input-group-append > select.input-group-text:not([size]):not([multiple]), +.input-group-lg > .input-group-prepend > select.btn:not([size]):not([multiple]), +.input-group-lg > .input-group-append > select.btn:not([size]):not([multiple]) { + height: calc(2.875rem + 2px); +} + +.form-group { + margin-bottom: 1rem; +} + +.form-text { + display: block; + margin-top: 0.25rem; +} + +.form-row { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + margin-right: -5px; + margin-left: -5px; +} +.form-row > .col, +.form-row > [class*="col-"] { + padding-right: 5px; + padding-left: 5px; +} + +.form-check { + position: relative; + display: block; + padding-left: 1.25rem; +} + +.form-check-input { + position: absolute; + margin-top: 0.3rem; + margin-left: -1.25rem; +} +.form-check-input:disabled ~ .form-check-label { + color: #6c757d; +} + +.form-check-label { + margin-bottom: 0; +} + +.form-check-inline { + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + padding-left: 0; + margin-right: 0.75rem; +} +.form-check-inline .form-check-input { + position: static; + margin-top: 0; + margin-right: 0.3125rem; + margin-left: 0; +} + +.valid-feedback { + display: none; + width: 100%; + margin-top: 0.25rem; + font-size: 80%; + color: #28a745; +} + +.valid-tooltip { + position: absolute; + top: 100%; + z-index: 5; + display: none; + max-width: 100%; + padding: .5rem; + margin-top: .1rem; + font-size: .875rem; + line-height: 1; + color: #fff; + background-color: rgba(40, 167, 69, 0.8); + border-radius: .2rem; +} + +.was-validated .form-control:valid, .form-control.is-valid, +.was-validated .custom-select:valid, +.custom-select.is-valid { + border-color: #28a745; +} +.was-validated .form-control:valid:focus, .form-control.is-valid:focus, +.was-validated .custom-select:valid:focus, +.custom-select.is-valid:focus { + border-color: #28a745; + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); +} +.was-validated .form-control:valid ~ .valid-feedback, +.was-validated .form-control:valid ~ .valid-tooltip, .form-control.is-valid ~ .valid-feedback, +.form-control.is-valid ~ .valid-tooltip, +.was-validated .custom-select:valid ~ .valid-feedback, +.was-validated .custom-select:valid ~ .valid-tooltip, +.custom-select.is-valid ~ .valid-feedback, +.custom-select.is-valid ~ .valid-tooltip { + display: block; +} + +.was-validated .form-check-input:valid ~ .form-check-label, .form-check-input.is-valid ~ .form-check-label { + color: #28a745; +} +.was-validated .form-check-input:valid ~ .valid-feedback, +.was-validated .form-check-input:valid ~ .valid-tooltip, .form-check-input.is-valid ~ .valid-feedback, +.form-check-input.is-valid ~ .valid-tooltip { + display: block; +} + +.was-validated .custom-control-input:valid ~ .custom-control-label, .custom-control-input.is-valid ~ .custom-control-label { + color: #28a745; +} +.was-validated .custom-control-input:valid ~ .custom-control-label::before, .custom-control-input.is-valid ~ .custom-control-label::before { + background-color: #71dd8a; +} +.was-validated .custom-control-input:valid ~ .valid-feedback, +.was-validated .custom-control-input:valid ~ .valid-tooltip, .custom-control-input.is-valid ~ .valid-feedback, +.custom-control-input.is-valid ~ .valid-tooltip { + display: block; +} +.was-validated .custom-control-input:valid:checked ~ .custom-control-label::before, .custom-control-input.is-valid:checked ~ .custom-control-label::before { + background-color: #34ce57; +} +.was-validated .custom-control-input:valid:focus ~ .custom-control-label::before, .custom-control-input.is-valid:focus ~ .custom-control-label::before { + -webkit-box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(40, 167, 69, 0.25); + box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(40, 167, 69, 0.25); +} + +.was-validated .custom-file-input:valid ~ .custom-file-label, .custom-file-input.is-valid ~ .custom-file-label { + border-color: #28a745; +} +.was-validated .custom-file-input:valid ~ .custom-file-label::before, .custom-file-input.is-valid ~ .custom-file-label::before { + border-color: inherit; +} +.was-validated .custom-file-input:valid ~ .valid-feedback, +.was-validated .custom-file-input:valid ~ .valid-tooltip, .custom-file-input.is-valid ~ .valid-feedback, +.custom-file-input.is-valid ~ .valid-tooltip { + display: block; +} +.was-validated .custom-file-input:valid:focus ~ .custom-file-label, .custom-file-input.is-valid:focus ~ .custom-file-label { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); +} + +.invalid-feedback { + display: none; + width: 100%; + margin-top: 0.25rem; + font-size: 80%; + color: #dc3545; +} + +.invalid-tooltip { + position: absolute; + top: 100%; + z-index: 5; + display: none; + max-width: 100%; + padding: .5rem; + margin-top: .1rem; + font-size: .875rem; + line-height: 1; + color: #fff; + background-color: rgba(220, 53, 69, 0.8); + border-radius: .2rem; +} + +.was-validated .form-control:invalid, .form-control.is-invalid, +.was-validated .custom-select:invalid, +.custom-select.is-invalid { + border-color: #dc3545; +} +.was-validated .form-control:invalid:focus, .form-control.is-invalid:focus, +.was-validated .custom-select:invalid:focus, +.custom-select.is-invalid:focus { + border-color: #dc3545; + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); +} +.was-validated .form-control:invalid ~ .invalid-feedback, +.was-validated .form-control:invalid ~ .invalid-tooltip, .form-control.is-invalid ~ .invalid-feedback, +.form-control.is-invalid ~ .invalid-tooltip, +.was-validated .custom-select:invalid ~ .invalid-feedback, +.was-validated .custom-select:invalid ~ .invalid-tooltip, +.custom-select.is-invalid ~ .invalid-feedback, +.custom-select.is-invalid ~ .invalid-tooltip { + display: block; +} + +.was-validated .form-check-input:invalid ~ .form-check-label, .form-check-input.is-invalid ~ .form-check-label { + color: #dc3545; +} +.was-validated .form-check-input:invalid ~ .invalid-feedback, +.was-validated .form-check-input:invalid ~ .invalid-tooltip, .form-check-input.is-invalid ~ .invalid-feedback, +.form-check-input.is-invalid ~ .invalid-tooltip { + display: block; +} + +.was-validated .custom-control-input:invalid ~ .custom-control-label, .custom-control-input.is-invalid ~ .custom-control-label { + color: #dc3545; +} +.was-validated .custom-control-input:invalid ~ .custom-control-label::before, .custom-control-input.is-invalid ~ .custom-control-label::before { + background-color: #efa2a9; +} +.was-validated .custom-control-input:invalid ~ .invalid-feedback, +.was-validated .custom-control-input:invalid ~ .invalid-tooltip, .custom-control-input.is-invalid ~ .invalid-feedback, +.custom-control-input.is-invalid ~ .invalid-tooltip { + display: block; +} +.was-validated .custom-control-input:invalid:checked ~ .custom-control-label::before, .custom-control-input.is-invalid:checked ~ .custom-control-label::before { + background-color: #e4606d; +} +.was-validated .custom-control-input:invalid:focus ~ .custom-control-label::before, .custom-control-input.is-invalid:focus ~ .custom-control-label::before { + -webkit-box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(220, 53, 69, 0.25); + box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(220, 53, 69, 0.25); +} + +.was-validated .custom-file-input:invalid ~ .custom-file-label, .custom-file-input.is-invalid ~ .custom-file-label { + border-color: #dc3545; +} +.was-validated .custom-file-input:invalid ~ .custom-file-label::before, .custom-file-input.is-invalid ~ .custom-file-label::before { + border-color: inherit; +} +.was-validated .custom-file-input:invalid ~ .invalid-feedback, +.was-validated .custom-file-input:invalid ~ .invalid-tooltip, .custom-file-input.is-invalid ~ .invalid-feedback, +.custom-file-input.is-invalid ~ .invalid-tooltip { + display: block; +} +.was-validated .custom-file-input:invalid:focus ~ .custom-file-label, .custom-file-input.is-invalid:focus ~ .custom-file-label { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); +} + +.form-inline { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} +.form-inline .form-check { + width: 100%; +} +@media (min-width: 576px) { + .form-inline label { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + margin-bottom: 0; + } + .form-inline .form-group { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + margin-bottom: 0; + } + .form-inline .form-control { + display: inline-block; + width: auto; + vertical-align: middle; + } + .form-inline .form-control-plaintext { + display: inline-block; + } + .form-inline .input-group { + width: auto; + } + .form-inline .form-check { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + width: auto; + padding-left: 0; + } + .form-inline .form-check-input { + position: relative; + margin-top: 0; + margin-right: 0.25rem; + margin-left: 0; + } + .form-inline .custom-control { + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + } + .form-inline .custom-control-label { + margin-bottom: 0; + } +} + +.btn { + display: inline-block; + font-weight: 400; + text-align: center; + white-space: nowrap; + vertical-align: middle; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + border: 1px solid transparent; + padding: 0.375rem 0.75rem; + font-size: 1rem; + line-height: 1.5; + border-radius: 0.25rem; + -webkit-transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; +} +.btn:hover, .btn:focus { + text-decoration: none; +} +.btn:focus, .btn.focus { + outline: 0; + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.btn.disabled, .btn:disabled { + opacity: 0.65; +} +.btn:not(:disabled):not(.disabled) { + cursor: pointer; +} +.btn:not(:disabled):not(.disabled):active, .btn:not(:disabled):not(.disabled).active { + background-image: none; +} + +a.btn.disabled, +fieldset:disabled a.btn { + pointer-events: none; +} + +.btn-primary { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.btn-primary:hover { + color: #fff; + background-color: #0069d9; + border-color: #0062cc; +} +.btn-primary:focus, .btn-primary.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} +.btn-primary.disabled, .btn-primary:disabled { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.btn-primary:not(:disabled):not(.disabled):active, .btn-primary:not(:disabled):not(.disabled).active, .show > .btn-primary.dropdown-toggle { + color: #fff; + background-color: #0062cc; + border-color: #005cbf; +} +.btn-primary:not(:disabled):not(.disabled):active:focus, .btn-primary:not(:disabled):not(.disabled).active:focus, .show > .btn-primary.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} + +.btn-secondary { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} +.btn-secondary:hover { + color: #fff; + background-color: #5a6268; + border-color: #545b62; +} +.btn-secondary:focus, .btn-secondary.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} +.btn-secondary.disabled, .btn-secondary:disabled { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} +.btn-secondary:not(:disabled):not(.disabled):active, .btn-secondary:not(:disabled):not(.disabled).active, .show > .btn-secondary.dropdown-toggle { + color: #fff; + background-color: #545b62; + border-color: #4e555b; +} +.btn-secondary:not(:disabled):not(.disabled):active:focus, .btn-secondary:not(:disabled):not(.disabled).active:focus, .show > .btn-secondary.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} + +.btn-success { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} +.btn-success:hover { + color: #fff; + background-color: #218838; + border-color: #1e7e34; +} +.btn-success:focus, .btn-success.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} +.btn-success.disabled, .btn-success:disabled { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} +.btn-success:not(:disabled):not(.disabled):active, .btn-success:not(:disabled):not(.disabled).active, .show > .btn-success.dropdown-toggle { + color: #fff; + background-color: #1e7e34; + border-color: #1c7430; +} +.btn-success:not(:disabled):not(.disabled):active:focus, .btn-success:not(:disabled):not(.disabled).active:focus, .show > .btn-success.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} + +.btn-info { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} +.btn-info:hover { + color: #fff; + background-color: #138496; + border-color: #117a8b; +} +.btn-info:focus, .btn-info.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} +.btn-info.disabled, .btn-info:disabled { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} +.btn-info:not(:disabled):not(.disabled):active, .btn-info:not(:disabled):not(.disabled).active, .show > .btn-info.dropdown-toggle { + color: #fff; + background-color: #117a8b; + border-color: #10707f; +} +.btn-info:not(:disabled):not(.disabled):active:focus, .btn-info:not(:disabled):not(.disabled).active:focus, .show > .btn-info.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} + +.btn-warning { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} +.btn-warning:hover { + color: #212529; + background-color: #e0a800; + border-color: #d39e00; +} +.btn-warning:focus, .btn-warning.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} +.btn-warning.disabled, .btn-warning:disabled { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} +.btn-warning:not(:disabled):not(.disabled):active, .btn-warning:not(:disabled):not(.disabled).active, .show > .btn-warning.dropdown-toggle { + color: #212529; + background-color: #d39e00; + border-color: #c69500; +} +.btn-warning:not(:disabled):not(.disabled):active:focus, .btn-warning:not(:disabled):not(.disabled).active:focus, .show > .btn-warning.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} + +.btn-danger { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} +.btn-danger:hover { + color: #fff; + background-color: #c82333; + border-color: #bd2130; +} +.btn-danger:focus, .btn-danger.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} +.btn-danger.disabled, .btn-danger:disabled { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} +.btn-danger:not(:disabled):not(.disabled):active, .btn-danger:not(:disabled):not(.disabled).active, .show > .btn-danger.dropdown-toggle { + color: #fff; + background-color: #bd2130; + border-color: #b21f2d; +} +.btn-danger:not(:disabled):not(.disabled):active:focus, .btn-danger:not(:disabled):not(.disabled).active:focus, .show > .btn-danger.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} + +.btn-light { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} +.btn-light:hover { + color: #212529; + background-color: #e2e6ea; + border-color: #dae0e5; +} +.btn-light:focus, .btn-light.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} +.btn-light.disabled, .btn-light:disabled { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} +.btn-light:not(:disabled):not(.disabled):active, .btn-light:not(:disabled):not(.disabled).active, .show > .btn-light.dropdown-toggle { + color: #212529; + background-color: #dae0e5; + border-color: #d3d9df; +} +.btn-light:not(:disabled):not(.disabled):active:focus, .btn-light:not(:disabled):not(.disabled).active:focus, .show > .btn-light.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} + +.btn-dark { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} +.btn-dark:hover { + color: #fff; + background-color: #23272b; + border-color: #1d2124; +} +.btn-dark:focus, .btn-dark.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} +.btn-dark.disabled, .btn-dark:disabled { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} +.btn-dark:not(:disabled):not(.disabled):active, .btn-dark:not(:disabled):not(.disabled).active, .show > .btn-dark.dropdown-toggle { + color: #fff; + background-color: #1d2124; + border-color: #171a1d; +} +.btn-dark:not(:disabled):not(.disabled):active:focus, .btn-dark:not(:disabled):not(.disabled).active:focus, .show > .btn-dark.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} + +.btn-outline-primary { + color: #007bff; + background-color: transparent; + background-image: none; + border-color: #007bff; +} +.btn-outline-primary:hover { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.btn-outline-primary:focus, .btn-outline-primary.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} +.btn-outline-primary.disabled, .btn-outline-primary:disabled { + color: #007bff; + background-color: transparent; +} +.btn-outline-primary:not(:disabled):not(.disabled):active, .btn-outline-primary:not(:disabled):not(.disabled).active, .show > .btn-outline-primary.dropdown-toggle { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.btn-outline-primary:not(:disabled):not(.disabled):active:focus, .btn-outline-primary:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-primary.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} + +.btn-outline-secondary { + color: #6c757d; + background-color: transparent; + background-image: none; + border-color: #6c757d; +} +.btn-outline-secondary:hover { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} +.btn-outline-secondary:focus, .btn-outline-secondary.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} +.btn-outline-secondary.disabled, .btn-outline-secondary:disabled { + color: #6c757d; + background-color: transparent; +} +.btn-outline-secondary:not(:disabled):not(.disabled):active, .btn-outline-secondary:not(:disabled):not(.disabled).active, .show > .btn-outline-secondary.dropdown-toggle { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} +.btn-outline-secondary:not(:disabled):not(.disabled):active:focus, .btn-outline-secondary:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-secondary.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} + +.btn-outline-success { + color: #28a745; + background-color: transparent; + background-image: none; + border-color: #28a745; +} +.btn-outline-success:hover { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} +.btn-outline-success:focus, .btn-outline-success.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} +.btn-outline-success.disabled, .btn-outline-success:disabled { + color: #28a745; + background-color: transparent; +} +.btn-outline-success:not(:disabled):not(.disabled):active, .btn-outline-success:not(:disabled):not(.disabled).active, .show > .btn-outline-success.dropdown-toggle { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} +.btn-outline-success:not(:disabled):not(.disabled):active:focus, .btn-outline-success:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-success.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} + +.btn-outline-info { + color: #17a2b8; + background-color: transparent; + background-image: none; + border-color: #17a2b8; +} +.btn-outline-info:hover { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} +.btn-outline-info:focus, .btn-outline-info.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} +.btn-outline-info.disabled, .btn-outline-info:disabled { + color: #17a2b8; + background-color: transparent; +} +.btn-outline-info:not(:disabled):not(.disabled):active, .btn-outline-info:not(:disabled):not(.disabled).active, .show > .btn-outline-info.dropdown-toggle { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} +.btn-outline-info:not(:disabled):not(.disabled):active:focus, .btn-outline-info:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-info.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} + +.btn-outline-warning { + color: #ffc107; + background-color: transparent; + background-image: none; + border-color: #ffc107; +} +.btn-outline-warning:hover { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} +.btn-outline-warning:focus, .btn-outline-warning.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} +.btn-outline-warning.disabled, .btn-outline-warning:disabled { + color: #ffc107; + background-color: transparent; +} +.btn-outline-warning:not(:disabled):not(.disabled):active, .btn-outline-warning:not(:disabled):not(.disabled).active, .show > .btn-outline-warning.dropdown-toggle { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} +.btn-outline-warning:not(:disabled):not(.disabled):active:focus, .btn-outline-warning:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-warning.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} + +.btn-outline-danger { + color: #dc3545; + background-color: transparent; + background-image: none; + border-color: #dc3545; +} +.btn-outline-danger:hover { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} +.btn-outline-danger:focus, .btn-outline-danger.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} +.btn-outline-danger.disabled, .btn-outline-danger:disabled { + color: #dc3545; + background-color: transparent; +} +.btn-outline-danger:not(:disabled):not(.disabled):active, .btn-outline-danger:not(:disabled):not(.disabled).active, .show > .btn-outline-danger.dropdown-toggle { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} +.btn-outline-danger:not(:disabled):not(.disabled):active:focus, .btn-outline-danger:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-danger.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} + +.btn-outline-light { + color: #f8f9fa; + background-color: transparent; + background-image: none; + border-color: #f8f9fa; +} +.btn-outline-light:hover { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} +.btn-outline-light:focus, .btn-outline-light.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} +.btn-outline-light.disabled, .btn-outline-light:disabled { + color: #f8f9fa; + background-color: transparent; +} +.btn-outline-light:not(:disabled):not(.disabled):active, .btn-outline-light:not(:disabled):not(.disabled).active, .show > .btn-outline-light.dropdown-toggle { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} +.btn-outline-light:not(:disabled):not(.disabled):active:focus, .btn-outline-light:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-light.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} + +.btn-outline-dark { + color: #343a40; + background-color: transparent; + background-image: none; + border-color: #343a40; +} +.btn-outline-dark:hover { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} +.btn-outline-dark:focus, .btn-outline-dark.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} +.btn-outline-dark.disabled, .btn-outline-dark:disabled { + color: #343a40; + background-color: transparent; +} +.btn-outline-dark:not(:disabled):not(.disabled):active, .btn-outline-dark:not(:disabled):not(.disabled).active, .show > .btn-outline-dark.dropdown-toggle { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} +.btn-outline-dark:not(:disabled):not(.disabled):active:focus, .btn-outline-dark:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-dark.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} + +.btn-link { + font-weight: 400; + color: #007bff; + background-color: transparent; +} +.btn-link:hover { + color: #0056b3; + text-decoration: underline; + background-color: transparent; + border-color: transparent; +} +.btn-link:focus, .btn-link.focus { + text-decoration: underline; + border-color: transparent; + -webkit-box-shadow: none; + box-shadow: none; +} +.btn-link:disabled, .btn-link.disabled { + color: #6c757d; +} + +.btn-lg, .btn-group-lg > .btn { + padding: 0.5rem 1rem; + font-size: 1.25rem; + line-height: 1.5; + border-radius: 0.3rem; +} + +.btn-sm, .btn-group-sm > .btn { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + line-height: 1.5; + border-radius: 0.2rem; +} + +.btn-block { + display: block; + width: 100%; +} +.btn-block + .btn-block { + margin-top: 0.5rem; +} + +input[type="submit"].btn-block, +input[type="reset"].btn-block, +input[type="button"].btn-block { + width: 100%; +} + +.fade { + opacity: 0; + -webkit-transition: opacity 0.15s linear; + transition: opacity 0.15s linear; +} +.fade.show { + opacity: 1; +} + +.collapse { + display: none; +} +.collapse.show { + display: block; +} + +tr.collapse.show { + display: table-row; +} + +tbody.collapse.show { + display: table-row-group; +} + +.collapsing { + position: relative; + height: 0; + overflow: hidden; + -webkit-transition: height 0.35s ease; + transition: height 0.35s ease; +} + +.dropup, +.dropdown { + position: relative; +} + +.dropdown-toggle::after { + display: inline-block; + width: 0; + height: 0; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid; + border-right: 0.3em solid transparent; + border-bottom: 0; + border-left: 0.3em solid transparent; +} +.dropdown-toggle:empty::after { + margin-left: 0; +} + +.dropdown-menu { + position: absolute; + top: 100%; + left: 0; + z-index: 1000; + display: none; + float: left; + min-width: 10rem; + padding: 0.5rem 0; + margin: 0.125rem 0 0; + font-size: 1rem; + color: #212529; + text-align: left; + list-style: none; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.15); + border-radius: 0.25rem; +} + +.dropup .dropdown-menu { + margin-top: 0; + margin-bottom: 0.125rem; +} +.dropup .dropdown-toggle::after { + display: inline-block; + width: 0; + height: 0; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0; + border-right: 0.3em solid transparent; + border-bottom: 0.3em solid; + border-left: 0.3em solid transparent; +} +.dropup .dropdown-toggle:empty::after { + margin-left: 0; +} + +.dropright .dropdown-menu { + margin-top: 0; + margin-left: 0.125rem; +} +.dropright .dropdown-toggle::after { + display: inline-block; + width: 0; + height: 0; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid transparent; + border-bottom: 0.3em solid transparent; + border-left: 0.3em solid; +} +.dropright .dropdown-toggle:empty::after { + margin-left: 0; +} +.dropright .dropdown-toggle::after { + vertical-align: 0; +} + +.dropleft .dropdown-menu { + margin-top: 0; + margin-right: 0.125rem; +} +.dropleft .dropdown-toggle::after { + display: inline-block; + width: 0; + height: 0; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; +} +.dropleft .dropdown-toggle::after { + display: none; +} +.dropleft .dropdown-toggle::before { + display: inline-block; + width: 0; + height: 0; + margin-right: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid transparent; + border-right: 0.3em solid; + border-bottom: 0.3em solid transparent; +} +.dropleft .dropdown-toggle:empty::after { + margin-left: 0; +} +.dropleft .dropdown-toggle::before { + vertical-align: 0; +} + +.dropdown-divider { + height: 0; + margin: 0.5rem 0; + overflow: hidden; + border-top: 1px solid #e9ecef; +} + +.dropdown-item { + display: block; + width: 100%; + padding: 0.25rem 1.5rem; + clear: both; + font-weight: 400; + color: #212529; + text-align: inherit; + white-space: nowrap; + background-color: transparent; + border: 0; +} +.dropdown-item:hover, .dropdown-item:focus { + color: #16181b; + text-decoration: none; + background-color: #f8f9fa; +} +.dropdown-item.active, .dropdown-item:active { + color: #fff; + text-decoration: none; + background-color: #007bff; +} +.dropdown-item.disabled, .dropdown-item:disabled { + color: #6c757d; + background-color: transparent; +} + +.dropdown-menu.show { + display: block; +} + +.dropdown-header { + display: block; + padding: 0.5rem 1.5rem; + margin-bottom: 0; + font-size: 0.875rem; + color: #6c757d; + white-space: nowrap; +} + +.btn-group, +.btn-group-vertical { + position: relative; + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; + vertical-align: middle; +} +.btn-group > .btn, +.btn-group-vertical > .btn { + position: relative; + -webkit-box-flex: 0; + -ms-flex: 0 1 auto; + flex: 0 1 auto; +} +.btn-group > .btn:hover, +.btn-group-vertical > .btn:hover { + z-index: 1; +} +.btn-group > .btn:focus, .btn-group > .btn:active, .btn-group > .btn.active, +.btn-group-vertical > .btn:focus, +.btn-group-vertical > .btn:active, +.btn-group-vertical > .btn.active { + z-index: 1; +} +.btn-group .btn + .btn, +.btn-group .btn + .btn-group, +.btn-group .btn-group + .btn, +.btn-group .btn-group + .btn-group, +.btn-group-vertical .btn + .btn, +.btn-group-vertical .btn + .btn-group, +.btn-group-vertical .btn-group + .btn, +.btn-group-vertical .btn-group + .btn-group { + margin-left: -1px; +} + +.btn-toolbar { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; +} +.btn-toolbar .input-group { + width: auto; +} + +.btn-group > .btn:first-child { + margin-left: 0; +} +.btn-group > .btn:not(:last-child):not(.dropdown-toggle), +.btn-group > .btn-group:not(:last-child) > .btn { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} +.btn-group > .btn:not(:first-child), +.btn-group > .btn-group:not(:first-child) > .btn { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} + +.dropdown-toggle-split { + padding-right: 0.5625rem; + padding-left: 0.5625rem; +} +.dropdown-toggle-split::after { + margin-left: 0; +} + +.btn-sm + .dropdown-toggle-split, .btn-group-sm > .btn + .dropdown-toggle-split { + padding-right: 0.375rem; + padding-left: 0.375rem; +} + +.btn-lg + .dropdown-toggle-split, .btn-group-lg > .btn + .dropdown-toggle-split { + padding-right: 0.75rem; + padding-left: 0.75rem; +} + +.btn-group-vertical { + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + -webkit-box-align: start; + -ms-flex-align: start; + align-items: flex-start; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; +} +.btn-group-vertical .btn, +.btn-group-vertical .btn-group { + width: 100%; +} +.btn-group-vertical > .btn + .btn, +.btn-group-vertical > .btn + .btn-group, +.btn-group-vertical > .btn-group + .btn, +.btn-group-vertical > .btn-group + .btn-group { + margin-top: -1px; + margin-left: 0; +} +.btn-group-vertical > .btn:not(:last-child):not(.dropdown-toggle), +.btn-group-vertical > .btn-group:not(:last-child) > .btn { + border-bottom-right-radius: 0; + border-bottom-left-radius: 0; +} +.btn-group-vertical > .btn:not(:first-child), +.btn-group-vertical > .btn-group:not(:first-child) > .btn { + border-top-left-radius: 0; + border-top-right-radius: 0; +} + +.btn-group-toggle > .btn, +.btn-group-toggle > .btn-group > .btn { + margin-bottom: 0; +} +.btn-group-toggle > .btn input[type="radio"], +.btn-group-toggle > .btn input[type="checkbox"], +.btn-group-toggle > .btn-group > .btn input[type="radio"], +.btn-group-toggle > .btn-group > .btn input[type="checkbox"] { + position: absolute; + clip: rect(0, 0, 0, 0); + pointer-events: none; +} + +.input-group { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -webkit-box-align: stretch; + -ms-flex-align: stretch; + align-items: stretch; + width: 100%; +} +.input-group > .form-control, +.input-group > .custom-select, +.input-group > .custom-file { + position: relative; + -webkit-box-flex: 1; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + width: 1%; + margin-bottom: 0; +} +.input-group > .form-control:focus, +.input-group > .custom-select:focus, +.input-group > .custom-file:focus { + z-index: 3; +} +.input-group > .form-control + .form-control, +.input-group > .form-control + .custom-select, +.input-group > .form-control + .custom-file, +.input-group > .custom-select + .form-control, +.input-group > .custom-select + .custom-select, +.input-group > .custom-select + .custom-file, +.input-group > .custom-file + .form-control, +.input-group > .custom-file + .custom-select, +.input-group > .custom-file + .custom-file { + margin-left: -1px; +} +.input-group > .form-control:not(:last-child), +.input-group > .custom-select:not(:last-child) { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} +.input-group > .form-control:not(:first-child), +.input-group > .custom-select:not(:first-child) { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} +.input-group > .custom-file { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} +.input-group > .custom-file:not(:last-child) .custom-file-label, .input-group > .custom-file:not(:last-child) .custom-file-label::before { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} +.input-group > .custom-file:not(:first-child) .custom-file-label, .input-group > .custom-file:not(:first-child) .custom-file-label::before { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} + +.input-group-prepend, +.input-group-append { + display: -webkit-box; + display: -ms-flexbox; + display: flex; +} +.input-group-prepend .btn, +.input-group-append .btn { + position: relative; + z-index: 2; +} +.input-group-prepend .btn + .btn, +.input-group-prepend .btn + .input-group-text, +.input-group-prepend .input-group-text + .input-group-text, +.input-group-prepend .input-group-text + .btn, +.input-group-append .btn + .btn, +.input-group-append .btn + .input-group-text, +.input-group-append .input-group-text + .input-group-text, +.input-group-append .input-group-text + .btn { + margin-left: -1px; +} + +.input-group-prepend { + margin-right: -1px; +} + +.input-group-append { + margin-left: -1px; +} + +.input-group-text { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + padding: 0.375rem 0.75rem; + margin-bottom: 0; + font-size: 1rem; + font-weight: 400; + line-height: 1.5; + color: #495057; + text-align: center; + white-space: nowrap; + background-color: #e9ecef; + border: 1px solid #ced4da; + border-radius: 0.25rem; +} +.input-group-text input[type="radio"], +.input-group-text input[type="checkbox"] { + margin-top: 0; +} + +.input-group > .input-group-prepend > .btn, +.input-group > .input-group-prepend > .input-group-text, +.input-group > .input-group-append:not(:last-child) > .btn, +.input-group > .input-group-append:not(:last-child) > .input-group-text, +.input-group > .input-group-append:last-child > .btn:not(:last-child):not(.dropdown-toggle), +.input-group > .input-group-append:last-child > .input-group-text:not(:last-child) { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} + +.input-group > .input-group-append > .btn, +.input-group > .input-group-append > .input-group-text, +.input-group > .input-group-prepend:not(:first-child) > .btn, +.input-group > .input-group-prepend:not(:first-child) > .input-group-text, +.input-group > .input-group-prepend:first-child > .btn:not(:first-child), +.input-group > .input-group-prepend:first-child > .input-group-text:not(:first-child) { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} + +.custom-control { + position: relative; + display: block; + min-height: 1.5rem; + padding-left: 1.5rem; +} + +.custom-control-inline { + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; + margin-right: 1rem; +} + +.custom-control-input { + position: absolute; + z-index: -1; + opacity: 0; +} +.custom-control-input:checked ~ .custom-control-label::before { + color: #fff; + background-color: #007bff; +} +.custom-control-input:focus ~ .custom-control-label::before { + -webkit-box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.custom-control-input:active ~ .custom-control-label::before { + color: #fff; + background-color: #b3d7ff; +} +.custom-control-input:disabled ~ .custom-control-label { + color: #6c757d; +} +.custom-control-input:disabled ~ .custom-control-label::before { + background-color: #e9ecef; +} + +.custom-control-label { + margin-bottom: 0; +} +.custom-control-label::before { + position: absolute; + top: 0.25rem; + left: 0; + display: block; + width: 1rem; + height: 1rem; + pointer-events: none; + content: ""; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + background-color: #dee2e6; +} +.custom-control-label::after { + position: absolute; + top: 0.25rem; + left: 0; + display: block; + width: 1rem; + height: 1rem; + content: ""; + background-repeat: no-repeat; + background-position: center center; + background-size: 50% 50%; +} + +.custom-checkbox .custom-control-label::before { + border-radius: 0.25rem; +} +.custom-checkbox .custom-control-input:checked ~ .custom-control-label::before { + background-color: #007bff; +} +.custom-checkbox .custom-control-input:checked ~ .custom-control-label::after { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26 2.974 7.25 8 2.193z'/%3E%3C/svg%3E"); +} +.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::before { + background-color: #007bff; +} +.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::after { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 4'%3E%3Cpath stroke='%23fff' d='M0 2h4'/%3E%3C/svg%3E"); +} +.custom-checkbox .custom-control-input:disabled:checked ~ .custom-control-label::before { + background-color: rgba(0, 123, 255, 0.5); +} +.custom-checkbox .custom-control-input:disabled:indeterminate ~ .custom-control-label::before { + background-color: rgba(0, 123, 255, 0.5); +} + +.custom-radio .custom-control-label::before { + border-radius: 50%; +} +.custom-radio .custom-control-input:checked ~ .custom-control-label::before { + background-color: #007bff; +} +.custom-radio .custom-control-input:checked ~ .custom-control-label::after { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E"); +} +.custom-radio .custom-control-input:disabled:checked ~ .custom-control-label::before { + background-color: rgba(0, 123, 255, 0.5); +} + +.custom-select { + display: inline-block; + width: 100%; + height: calc(2.25rem + 2px); + padding: 0.375rem 1.75rem 0.375rem 0.75rem; + line-height: 1.5; + color: #495057; + vertical-align: middle; + background: #fff url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right 0.75rem center; + background-size: 8px 10px; + border: 1px solid #ced4da; + border-radius: 0.25rem; + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; +} +.custom-select:focus { + border-color: #80bdff; + outline: 0; + -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.075), 0 0 5px rgba(128, 189, 255, 0.5); + box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.075), 0 0 5px rgba(128, 189, 255, 0.5); +} +.custom-select:focus::-ms-value { + color: #495057; + background-color: #fff; +} +.custom-select[multiple], .custom-select[size]:not([size="1"]) { + height: auto; + padding-right: 0.75rem; + background-image: none; +} +.custom-select:disabled { + color: #6c757d; + background-color: #e9ecef; +} +.custom-select::-ms-expand { + opacity: 0; +} + +.custom-select-sm { + height: calc(1.8125rem + 2px); + padding-top: 0.375rem; + padding-bottom: 0.375rem; + font-size: 75%; +} + +.custom-select-lg { + height: calc(2.875rem + 2px); + padding-top: 0.375rem; + padding-bottom: 0.375rem; + font-size: 125%; +} + +.custom-file { + position: relative; + display: inline-block; + width: 100%; + height: calc(2.25rem + 2px); + margin-bottom: 0; +} + +.custom-file-input { + position: relative; + z-index: 2; + width: 100%; + height: calc(2.25rem + 2px); + margin: 0; + opacity: 0; +} +.custom-file-input:focus ~ .custom-file-control { + border-color: #80bdff; + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.custom-file-input:focus ~ .custom-file-control::before { + border-color: #80bdff; +} +.custom-file-input:lang(en) ~ .custom-file-label::after { + content: "Browse"; +} + +.custom-file-label { + position: absolute; + top: 0; + right: 0; + left: 0; + z-index: 1; + height: calc(2.25rem + 2px); + padding: 0.375rem 0.75rem; + line-height: 1.5; + color: #495057; + background-color: #fff; + border: 1px solid #ced4da; + border-radius: 0.25rem; +} +.custom-file-label::after { + position: absolute; + top: 0; + right: 0; + bottom: 0; + z-index: 3; + display: block; + height: calc(calc(2.25rem + 2px) - 1px * 2); + padding: 0.375rem 0.75rem; + line-height: 1.5; + color: #495057; + content: "Browse"; + background-color: #e9ecef; + border-left: 1px solid #ced4da; + border-radius: 0 0.25rem 0.25rem 0; +} + +.nav { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + padding-left: 0; + margin-bottom: 0; + list-style: none; +} + +.nav-link { + display: block; + padding: 0.5rem 1rem; +} +.nav-link:hover, .nav-link:focus { + text-decoration: none; +} +.nav-link.disabled { + color: #6c757d; +} + +.nav-tabs { + border-bottom: 1px solid #dee2e6; +} +.nav-tabs .nav-item { + margin-bottom: -1px; +} +.nav-tabs .nav-link { + border: 1px solid transparent; + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; +} +.nav-tabs .nav-link:hover, .nav-tabs .nav-link:focus { + border-color: #e9ecef #e9ecef #dee2e6; +} +.nav-tabs .nav-link.disabled { + color: #6c757d; + background-color: transparent; + border-color: transparent; +} +.nav-tabs .nav-link.active, +.nav-tabs .nav-item.show .nav-link { + color: #495057; + background-color: #fff; + border-color: #dee2e6 #dee2e6 #fff; +} +.nav-tabs .dropdown-menu { + margin-top: -1px; + border-top-left-radius: 0; + border-top-right-radius: 0; +} + +.nav-pills .nav-link { + border-radius: 0.25rem; +} +.nav-pills .nav-link.active, +.nav-pills .show > .nav-link { + color: #fff; + background-color: #007bff; +} + +.nav-fill .nav-item { + -webkit-box-flex: 1; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + text-align: center; +} + +.nav-justified .nav-item { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + text-align: center; +} + +.tab-content > .tab-pane { + display: none; +} +.tab-content > .active { + display: block; +} + +.navbar { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: justify; + -ms-flex-pack: justify; + justify-content: space-between; + padding: 0.5rem 1rem; +} +.navbar > .container, +.navbar > .container-fluid { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: justify; + -ms-flex-pack: justify; + justify-content: space-between; +} + +.navbar-brand { + display: inline-block; + padding-top: 0.3125rem; + padding-bottom: 0.3125rem; + margin-right: 1rem; + font-size: 1.25rem; + line-height: inherit; + white-space: nowrap; +} +.navbar-brand:hover, .navbar-brand:focus { + text-decoration: none; +} + +.navbar-nav { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + padding-left: 0; + margin-bottom: 0; + list-style: none; +} +.navbar-nav .nav-link { + padding-right: 0; + padding-left: 0; +} +.navbar-nav .dropdown-menu { + position: static; + float: none; +} + +.navbar-text { + display: inline-block; + padding-top: 0.5rem; + padding-bottom: 0.5rem; +} + +.navbar-collapse { + -ms-flex-preferred-size: 100%; + flex-basis: 100%; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} + +.navbar-toggler { + padding: 0.25rem 0.75rem; + font-size: 1.25rem; + line-height: 1; + background-color: transparent; + border: 1px solid transparent; + border-radius: 0.25rem; +} +.navbar-toggler:hover, .navbar-toggler:focus { + text-decoration: none; +} +.navbar-toggler:not(:disabled):not(.disabled) { + cursor: pointer; +} + +.navbar-toggler-icon { + display: inline-block; + width: 1.5em; + height: 1.5em; + vertical-align: middle; + content: ""; + background: no-repeat center center; + background-size: 100% 100%; +} + +@media (max-width: 575.98px) { + .navbar-expand-sm > .container, + .navbar-expand-sm > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} +@media (min-width: 576px) { + .navbar-expand-sm { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-sm .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-sm .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-sm .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; + } + .navbar-expand-sm .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-sm > .container, + .navbar-expand-sm > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-sm .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-sm .navbar-toggler { + display: none; + } + .navbar-expand-sm .dropup .dropdown-menu { + top: auto; + bottom: 100%; + } +} +@media (max-width: 767.98px) { + .navbar-expand-md > .container, + .navbar-expand-md > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} +@media (min-width: 768px) { + .navbar-expand-md { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-md .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-md .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-md .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; + } + .navbar-expand-md .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-md > .container, + .navbar-expand-md > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-md .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-md .navbar-toggler { + display: none; + } + .navbar-expand-md .dropup .dropdown-menu { + top: auto; + bottom: 100%; + } +} +@media (max-width: 991.98px) { + .navbar-expand-lg > .container, + .navbar-expand-lg > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} +@media (min-width: 992px) { + .navbar-expand-lg { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-lg .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-lg .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-lg .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; + } + .navbar-expand-lg .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-lg > .container, + .navbar-expand-lg > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-lg .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-lg .navbar-toggler { + display: none; + } + .navbar-expand-lg .dropup .dropdown-menu { + top: auto; + bottom: 100%; + } +} +@media (max-width: 1199.98px) { + .navbar-expand-xl > .container, + .navbar-expand-xl > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} +@media (min-width: 1200px) { + .navbar-expand-xl { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-xl .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-xl .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-xl .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; + } + .navbar-expand-xl .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-xl > .container, + .navbar-expand-xl > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-xl .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-xl .navbar-toggler { + display: none; + } + .navbar-expand-xl .dropup .dropdown-menu { + top: auto; + bottom: 100%; + } +} +.navbar-expand { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; +} +.navbar-expand > .container, +.navbar-expand > .container-fluid { + padding-right: 0; + padding-left: 0; +} +.navbar-expand .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; +} +.navbar-expand .navbar-nav .dropdown-menu { + position: absolute; +} +.navbar-expand .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; +} +.navbar-expand .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; +} +.navbar-expand > .container, +.navbar-expand > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; +} +.navbar-expand .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; +} +.navbar-expand .navbar-toggler { + display: none; +} +.navbar-expand .dropup .dropdown-menu { + top: auto; + bottom: 100%; +} + +.navbar-light .navbar-brand { + color: rgba(0, 0, 0, 0.9); +} +.navbar-light .navbar-brand:hover, .navbar-light .navbar-brand:focus { + color: rgba(0, 0, 0, 0.9); +} +.navbar-light .navbar-nav .nav-link { + color: rgba(0, 0, 0, 0.5); +} +.navbar-light .navbar-nav .nav-link:hover, .navbar-light .navbar-nav .nav-link:focus { + color: rgba(0, 0, 0, 0.7); +} +.navbar-light .navbar-nav .nav-link.disabled { + color: rgba(0, 0, 0, 0.3); +} +.navbar-light .navbar-nav .show > .nav-link, +.navbar-light .navbar-nav .active > .nav-link, +.navbar-light .navbar-nav .nav-link.show, +.navbar-light .navbar-nav .nav-link.active { + color: rgba(0, 0, 0, 0.9); +} +.navbar-light .navbar-toggler { + color: rgba(0, 0, 0, 0.5); + border-color: rgba(0, 0, 0, 0.1); +} +.navbar-light .navbar-toggler-icon { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(0, 0, 0, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E"); +} +.navbar-light .navbar-text { + color: rgba(0, 0, 0, 0.5); +} +.navbar-light .navbar-text a { + color: rgba(0, 0, 0, 0.9); +} +.navbar-light .navbar-text a:hover, .navbar-light .navbar-text a:focus { + color: rgba(0, 0, 0, 0.9); +} + +.navbar-dark .navbar-brand { + color: #fff; +} +.navbar-dark .navbar-brand:hover, .navbar-dark .navbar-brand:focus { + color: #fff; +} +.navbar-dark .navbar-nav .nav-link { + color: rgba(255, 255, 255, 0.5); +} +.navbar-dark .navbar-nav .nav-link:hover, .navbar-dark .navbar-nav .nav-link:focus { + color: rgba(255, 255, 255, 0.75); +} +.navbar-dark .navbar-nav .nav-link.disabled { + color: rgba(255, 255, 255, 0.25); +} +.navbar-dark .navbar-nav .show > .nav-link, +.navbar-dark .navbar-nav .active > .nav-link, +.navbar-dark .navbar-nav .nav-link.show, +.navbar-dark .navbar-nav .nav-link.active { + color: #fff; +} +.navbar-dark .navbar-toggler { + color: rgba(255, 255, 255, 0.5); + border-color: rgba(255, 255, 255, 0.1); +} +.navbar-dark .navbar-toggler-icon { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E"); +} +.navbar-dark .navbar-text { + color: rgba(255, 255, 255, 0.5); +} +.navbar-dark .navbar-text a { + color: #fff; +} +.navbar-dark .navbar-text a:hover, .navbar-dark .navbar-text a:focus { + color: #fff; +} + +.card { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + min-width: 0; + word-wrap: break-word; + background-color: #fff; + background-clip: border-box; + border: 1px solid rgba(0, 0, 0, 0.125); + border-radius: 0.25rem; +} +.card > hr { + margin-right: 0; + margin-left: 0; +} +.card > .list-group:first-child .list-group-item:first-child { + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; +} +.card > .list-group:last-child .list-group-item:last-child { + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; +} + +.card-body { + -webkit-box-flex: 1; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + padding: 1.25rem; +} + +.card-title { + margin-bottom: 0.75rem; +} + +.card-subtitle { + margin-top: -0.375rem; + margin-bottom: 0; +} + +.card-text:last-child { + margin-bottom: 0; +} + +.card-link:hover { + text-decoration: none; +} +.card-link + .card-link { + margin-left: 1.25rem; +} + +.card-header { + padding: 0.75rem 1.25rem; + margin-bottom: 0; + background-color: rgba(0, 0, 0, 0.03); + border-bottom: 1px solid rgba(0, 0, 0, 0.125); +} +.card-header:first-child { + border-radius: calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0; +} +.card-header + .list-group .list-group-item:first-child { + border-top: 0; +} + +.card-footer { + padding: 0.75rem 1.25rem; + background-color: rgba(0, 0, 0, 0.03); + border-top: 1px solid rgba(0, 0, 0, 0.125); +} +.card-footer:last-child { + border-radius: 0 0 calc(0.25rem - 1px) calc(0.25rem - 1px); +} + +.card-header-tabs { + margin-right: -0.625rem; + margin-bottom: -0.75rem; + margin-left: -0.625rem; + border-bottom: 0; +} + +.card-header-pills { + margin-right: -0.625rem; + margin-left: -0.625rem; +} + +.card-img-overlay { + position: absolute; + top: 0; + right: 0; + bottom: 0; + left: 0; + padding: 1.25rem; +} + +.card-img { + width: 100%; + border-radius: calc(0.25rem - 1px); +} + +.card-img-top { + width: 100%; + border-top-left-radius: calc(0.25rem - 1px); + border-top-right-radius: calc(0.25rem - 1px); +} + +.card-img-bottom { + width: 100%; + border-bottom-right-radius: calc(0.25rem - 1px); + border-bottom-left-radius: calc(0.25rem - 1px); +} + +.card-deck { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; +} +.card-deck .card { + margin-bottom: 15px; +} +@media (min-width: 576px) { + .card-deck { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + margin-right: -15px; + margin-left: -15px; + } + .card-deck .card { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-flex: 1; + -ms-flex: 1 0 0%; + flex: 1 0 0%; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + margin-right: 15px; + margin-bottom: 0; + margin-left: 15px; + } +} + +.card-group { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; +} +.card-group > .card { + margin-bottom: 15px; +} +@media (min-width: 576px) { + .card-group { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + } + .card-group > .card { + -webkit-box-flex: 1; + -ms-flex: 1 0 0%; + flex: 1 0 0%; + margin-bottom: 0; + } + .card-group > .card + .card { + margin-left: 0; + border-left: 0; + } + .card-group > .card:first-child { + border-top-right-radius: 0; + border-bottom-right-radius: 0; + } + .card-group > .card:first-child .card-img-top, + .card-group > .card:first-child .card-header { + border-top-right-radius: 0; + } + .card-group > .card:first-child .card-img-bottom, + .card-group > .card:first-child .card-footer { + border-bottom-right-radius: 0; + } + .card-group > .card:last-child { + border-top-left-radius: 0; + border-bottom-left-radius: 0; + } + .card-group > .card:last-child .card-img-top, + .card-group > .card:last-child .card-header { + border-top-left-radius: 0; + } + .card-group > .card:last-child .card-img-bottom, + .card-group > .card:last-child .card-footer { + border-bottom-left-radius: 0; + } + .card-group > .card:only-child { + border-radius: 0.25rem; + } + .card-group > .card:only-child .card-img-top, + .card-group > .card:only-child .card-header { + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; + } + .card-group > .card:only-child .card-img-bottom, + .card-group > .card:only-child .card-footer { + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; + } + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) { + border-radius: 0; + } + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-img-top, + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-img-bottom, + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-header, + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-footer { + border-radius: 0; + } +} + +.card-columns .card { + margin-bottom: 0.75rem; +} +@media (min-width: 576px) { + .card-columns { + -webkit-column-count: 3; + -moz-column-count: 3; + column-count: 3; + -webkit-column-gap: 1.25rem; + -moz-column-gap: 1.25rem; + column-gap: 1.25rem; + } + .card-columns .card { + display: inline-block; + width: 100%; + } +} + +.breadcrumb { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + padding: 0.75rem 1rem; + margin-bottom: 1rem; + list-style: none; + background-color: #e9ecef; + border-radius: 0.25rem; +} + +.breadcrumb-item + .breadcrumb-item::before { + display: inline-block; + padding-right: 0.5rem; + padding-left: 0.5rem; + color: #6c757d; + content: "/"; +} +.breadcrumb-item + .breadcrumb-item:hover::before { + text-decoration: underline; +} +.breadcrumb-item + .breadcrumb-item:hover::before { + text-decoration: none; +} +.breadcrumb-item.active { + color: #6c757d; +} + +.pagination { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + padding-left: 0; + list-style: none; + border-radius: 0.25rem; +} + +.page-link { + position: relative; + display: block; + padding: 0.5rem 0.75rem; + margin-left: -1px; + line-height: 1.25; + color: #007bff; + background-color: #fff; + border: 1px solid #dee2e6; +} +.page-link:hover { + color: #0056b3; + text-decoration: none; + background-color: #e9ecef; + border-color: #dee2e6; +} +.page-link:focus { + z-index: 2; + outline: 0; + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.page-link:not(:disabled):not(.disabled) { + cursor: pointer; +} + +.page-item:first-child .page-link { + margin-left: 0; + border-top-left-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; +} +.page-item:last-child .page-link { + border-top-right-radius: 0.25rem; + border-bottom-right-radius: 0.25rem; +} +.page-item.active .page-link { + z-index: 1; + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.page-item.disabled .page-link { + color: #6c757d; + pointer-events: none; + cursor: auto; + background-color: #fff; + border-color: #dee2e6; +} + +.pagination-lg .page-link { + padding: 0.75rem 1.5rem; + font-size: 1.25rem; + line-height: 1.5; +} +.pagination-lg .page-item:first-child .page-link { + border-top-left-radius: 0.3rem; + border-bottom-left-radius: 0.3rem; +} +.pagination-lg .page-item:last-child .page-link { + border-top-right-radius: 0.3rem; + border-bottom-right-radius: 0.3rem; +} + +.pagination-sm .page-link { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + line-height: 1.5; +} +.pagination-sm .page-item:first-child .page-link { + border-top-left-radius: 0.2rem; + border-bottom-left-radius: 0.2rem; +} +.pagination-sm .page-item:last-child .page-link { + border-top-right-radius: 0.2rem; + border-bottom-right-radius: 0.2rem; +} + +.badge { + display: inline-block; + padding: 0.25em 0.4em; + font-size: 75%; + font-weight: 700; + line-height: 1; + text-align: center; + white-space: nowrap; + vertical-align: baseline; + border-radius: 0.25rem; +} +.badge:empty { + display: none; +} + +.btn .badge { + position: relative; + top: -1px; +} + +.badge-pill { + padding-right: 0.6em; + padding-left: 0.6em; + border-radius: 10rem; +} + +.badge-primary { + color: #fff; + background-color: #007bff; +} +.badge-primary[href]:hover, .badge-primary[href]:focus { + color: #fff; + text-decoration: none; + background-color: #0062cc; +} + +.badge-secondary { + color: #fff; + background-color: #6c757d; +} +.badge-secondary[href]:hover, .badge-secondary[href]:focus { + color: #fff; + text-decoration: none; + background-color: #545b62; +} + +.badge-success { + color: #fff; + background-color: #28a745; +} +.badge-success[href]:hover, .badge-success[href]:focus { + color: #fff; + text-decoration: none; + background-color: #1e7e34; +} + +.badge-info { + color: #fff; + background-color: #17a2b8; +} +.badge-info[href]:hover, .badge-info[href]:focus { + color: #fff; + text-decoration: none; + background-color: #117a8b; +} + +.badge-warning { + color: #212529; + background-color: #ffc107; +} +.badge-warning[href]:hover, .badge-warning[href]:focus { + color: #212529; + text-decoration: none; + background-color: #d39e00; +} + +.badge-danger { + color: #fff; + background-color: #dc3545; +} +.badge-danger[href]:hover, .badge-danger[href]:focus { + color: #fff; + text-decoration: none; + background-color: #bd2130; +} + +.badge-light { + color: #212529; + background-color: #f8f9fa; +} +.badge-light[href]:hover, .badge-light[href]:focus { + color: #212529; + text-decoration: none; + background-color: #dae0e5; +} + +.badge-dark { + color: #fff; + background-color: #343a40; +} +.badge-dark[href]:hover, .badge-dark[href]:focus { + color: #fff; + text-decoration: none; + background-color: #1d2124; +} + +.jumbotron { + padding: 2rem 1rem; + margin-bottom: 2rem; + background-color: #e9ecef; + border-radius: 0.3rem; +} +@media (min-width: 576px) { + .jumbotron { + padding: 4rem 2rem; + } +} + +.jumbotron-fluid { + padding-right: 0; + padding-left: 0; + border-radius: 0; +} + +.alert { + position: relative; + padding: 0.75rem 1.25rem; + margin-bottom: 1rem; + border: 1px solid transparent; + border-radius: 0.25rem; +} + +.alert-heading { + color: inherit; +} + +.alert-link { + font-weight: 700; +} + +.alert-dismissible { + padding-right: 4rem; +} +.alert-dismissible .close { + position: absolute; + top: 0; + right: 0; + padding: 0.75rem 1.25rem; + color: inherit; +} + +.alert-primary { + color: #004085; + background-color: #cce5ff; + border-color: #b8daff; +} +.alert-primary hr { + border-top-color: #9fcdff; +} +.alert-primary .alert-link { + color: #002752; +} + +.alert-secondary { + color: #383d41; + background-color: #e2e3e5; + border-color: #d6d8db; +} +.alert-secondary hr { + border-top-color: #c8cbcf; +} +.alert-secondary .alert-link { + color: #202326; +} + +.alert-success { + color: #155724; + background-color: #d4edda; + border-color: #c3e6cb; +} +.alert-success hr { + border-top-color: #b1dfbb; +} +.alert-success .alert-link { + color: #0b2e13; +} + +.alert-info { + color: #0c5460; + background-color: #d1ecf1; + border-color: #bee5eb; +} +.alert-info hr { + border-top-color: #abdde5; +} +.alert-info .alert-link { + color: #062c33; +} + +.alert-warning { + color: #856404; + background-color: #fff3cd; + border-color: #ffeeba; +} +.alert-warning hr { + border-top-color: #ffe8a1; +} +.alert-warning .alert-link { + color: #533f03; +} + +.alert-danger { + color: #721c24; + background-color: #f8d7da; + border-color: #f5c6cb; +} +.alert-danger hr { + border-top-color: #f1b0b7; +} +.alert-danger .alert-link { + color: #491217; +} + +.alert-light { + color: #818182; + background-color: #fefefe; + border-color: #fdfdfe; +} +.alert-light hr { + border-top-color: #ececf6; +} +.alert-light .alert-link { + color: #686868; +} + +.alert-dark { + color: #1b1e21; + background-color: #d6d8d9; + border-color: #c6c8ca; +} +.alert-dark hr { + border-top-color: #b9bbbe; +} +.alert-dark .alert-link { + color: #040505; +} + +@-webkit-keyframes progress-bar-stripes { + from { + background-position: 1rem 0; + } + to { + background-position: 0 0; + } +} + +@keyframes progress-bar-stripes { + from { + background-position: 1rem 0; + } + to { + background-position: 0 0; + } +} +.progress { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + height: 1rem; + overflow: hidden; + font-size: 0.75rem; + background-color: #e9ecef; + border-radius: 0.25rem; +} + +.progress-bar { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + color: #fff; + text-align: center; + background-color: #007bff; + -webkit-transition: width 0.6s ease; + transition: width 0.6s ease; +} + +.progress-bar-striped { + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; +} + +.progress-bar-animated { + -webkit-animation: progress-bar-stripes 1s linear infinite; + animation: progress-bar-stripes 1s linear infinite; +} + +.media { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: start; + -ms-flex-align: start; + align-items: flex-start; +} + +.media-body { + -webkit-box-flex: 1; + -ms-flex: 1; + flex: 1; +} + +.list-group { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + padding-left: 0; + margin-bottom: 0; +} + +.list-group-item-action { + width: 100%; + color: #495057; + text-align: inherit; +} +.list-group-item-action:hover, .list-group-item-action:focus { + color: #495057; + text-decoration: none; + background-color: #f8f9fa; +} +.list-group-item-action:active { + color: #212529; + background-color: #e9ecef; +} + +.list-group-item { + position: relative; + display: block; + padding: 0.75rem 1.25rem; + margin-bottom: -1px; + background-color: #fff; + border: 1px solid rgba(0, 0, 0, 0.125); +} +.list-group-item:first-child { + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; +} +.list-group-item:last-child { + margin-bottom: 0; + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; +} +.list-group-item:hover, .list-group-item:focus { + z-index: 1; + text-decoration: none; +} +.list-group-item.disabled, .list-group-item:disabled { + color: #6c757d; + background-color: #fff; +} +.list-group-item.active { + z-index: 2; + color: #fff; + background-color: #007bff; + border-color: #007bff; +} + +.list-group-flush .list-group-item { + border-right: 0; + border-left: 0; + border-radius: 0; +} +.list-group-flush:first-child .list-group-item:first-child { + border-top: 0; +} +.list-group-flush:last-child .list-group-item:last-child { + border-bottom: 0; +} + +.list-group-item-primary { + color: #004085; + background-color: #b8daff; +} +.list-group-item-primary.list-group-item-action:hover, .list-group-item-primary.list-group-item-action:focus { + color: #004085; + background-color: #9fcdff; +} +.list-group-item-primary.list-group-item-action.active { + color: #fff; + background-color: #004085; + border-color: #004085; +} + +.list-group-item-secondary { + color: #383d41; + background-color: #d6d8db; +} +.list-group-item-secondary.list-group-item-action:hover, .list-group-item-secondary.list-group-item-action:focus { + color: #383d41; + background-color: #c8cbcf; +} +.list-group-item-secondary.list-group-item-action.active { + color: #fff; + background-color: #383d41; + border-color: #383d41; +} + +.list-group-item-success { + color: #155724; + background-color: #c3e6cb; +} +.list-group-item-success.list-group-item-action:hover, .list-group-item-success.list-group-item-action:focus { + color: #155724; + background-color: #b1dfbb; +} +.list-group-item-success.list-group-item-action.active { + color: #fff; + background-color: #155724; + border-color: #155724; +} + +.list-group-item-info { + color: #0c5460; + background-color: #bee5eb; +} +.list-group-item-info.list-group-item-action:hover, .list-group-item-info.list-group-item-action:focus { + color: #0c5460; + background-color: #abdde5; +} +.list-group-item-info.list-group-item-action.active { + color: #fff; + background-color: #0c5460; + border-color: #0c5460; +} + +.list-group-item-warning { + color: #856404; + background-color: #ffeeba; +} +.list-group-item-warning.list-group-item-action:hover, .list-group-item-warning.list-group-item-action:focus { + color: #856404; + background-color: #ffe8a1; +} +.list-group-item-warning.list-group-item-action.active { + color: #fff; + background-color: #856404; + border-color: #856404; +} + +.list-group-item-danger { + color: #721c24; + background-color: #f5c6cb; +} +.list-group-item-danger.list-group-item-action:hover, .list-group-item-danger.list-group-item-action:focus { + color: #721c24; + background-color: #f1b0b7; +} +.list-group-item-danger.list-group-item-action.active { + color: #fff; + background-color: #721c24; + border-color: #721c24; +} + +.list-group-item-light { + color: #818182; + background-color: #fdfdfe; +} +.list-group-item-light.list-group-item-action:hover, .list-group-item-light.list-group-item-action:focus { + color: #818182; + background-color: #ececf6; +} +.list-group-item-light.list-group-item-action.active { + color: #fff; + background-color: #818182; + border-color: #818182; +} + +.list-group-item-dark { + color: #1b1e21; + background-color: #c6c8ca; +} +.list-group-item-dark.list-group-item-action:hover, .list-group-item-dark.list-group-item-action:focus { + color: #1b1e21; + background-color: #b9bbbe; +} +.list-group-item-dark.list-group-item-action.active { + color: #fff; + background-color: #1b1e21; + border-color: #1b1e21; +} + +.close { + float: right; + font-size: 1.5rem; + font-weight: 700; + line-height: 1; + color: #000; + text-shadow: 0 1px 0 #fff; + opacity: .5; +} +.close:hover, .close:focus { + color: #000; + text-decoration: none; + opacity: .75; +} +.close:not(:disabled):not(.disabled) { + cursor: pointer; +} + +button.close { + padding: 0; + background-color: transparent; + border: 0; + -webkit-appearance: none; +} + +.modal-open { + overflow: hidden; +} + +.modal { + position: fixed; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: 1050; + display: none; + overflow: hidden; + outline: 0; +} +.modal-open .modal { + overflow-x: hidden; + overflow-y: auto; +} + +.modal-dialog { + position: relative; + width: auto; + margin: 0.5rem; + pointer-events: none; +} +.modal.fade .modal-dialog { + -webkit-transition: -webkit-transform 0.3s ease-out; + transition: -webkit-transform 0.3s ease-out; + transition: transform 0.3s ease-out; + transition: transform 0.3s ease-out, -webkit-transform 0.3s ease-out; + -webkit-transform: translate(0, -25%); + transform: translate(0, -25%); +} +.modal.show .modal-dialog { + -webkit-transform: translate(0, 0); + transform: translate(0, 0); +} + +.modal-dialog-centered { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + min-height: calc(100% - (0.5rem * 2)); +} + +.modal-content { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + width: 100%; + pointer-events: auto; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.2); + border-radius: 0.3rem; + outline: 0; +} + +.modal-backdrop { + position: fixed; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: 1040; + background-color: #000; +} +.modal-backdrop.fade { + opacity: 0; +} +.modal-backdrop.show { + opacity: 0.5; +} + +.modal-header { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: start; + -ms-flex-align: start; + align-items: flex-start; + -webkit-box-pack: justify; + -ms-flex-pack: justify; + justify-content: space-between; + padding: 1rem; + border-bottom: 1px solid #e9ecef; + border-top-left-radius: 0.3rem; + border-top-right-radius: 0.3rem; +} +.modal-header .close { + padding: 1rem; + margin: -1rem -1rem -1rem auto; +} + +.modal-title { + margin-bottom: 0; + line-height: 1.5; +} + +.modal-body { + position: relative; + -webkit-box-flex: 1; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + padding: 1rem; +} + +.modal-footer { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: end; + -ms-flex-pack: end; + justify-content: flex-end; + padding: 1rem; + border-top: 1px solid #e9ecef; +} +.modal-footer > :not(:first-child) { + margin-left: .25rem; +} +.modal-footer > :not(:last-child) { + margin-right: .25rem; +} + +.modal-scrollbar-measure { + position: absolute; + top: -9999px; + width: 50px; + height: 50px; + overflow: scroll; +} + +@media (min-width: 576px) { + .modal-dialog { + max-width: 500px; + margin: 1.75rem auto; + } + + .modal-dialog-centered { + min-height: calc(100% - (1.75rem * 2)); + } + + .modal-sm { + max-width: 300px; + } +} +@media (min-width: 992px) { + .modal-lg { + max-width: 800px; + } +} +.tooltip { + position: absolute; + z-index: 1070; + display: block; + margin: 0; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-style: normal; + font-weight: 400; + line-height: 1.5; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + word-spacing: normal; + white-space: normal; + line-break: auto; + font-size: 0.875rem; + word-wrap: break-word; + opacity: 0; +} +.tooltip.show { + opacity: 0.9; +} +.tooltip .arrow { + position: absolute; + display: block; + width: 0.8rem; + height: 0.4rem; +} +.tooltip .arrow::before { + position: absolute; + content: ""; + border-color: transparent; + border-style: solid; +} + +.bs-tooltip-top, .bs-tooltip-auto[x-placement^="top"] { + padding: 0.4rem 0; +} +.bs-tooltip-top .arrow, .bs-tooltip-auto[x-placement^="top"] .arrow { + bottom: 0; +} +.bs-tooltip-top .arrow::before, .bs-tooltip-auto[x-placement^="top"] .arrow::before { + top: 0; + border-width: 0.4rem 0.4rem 0; + border-top-color: #000; +} + +.bs-tooltip-right, .bs-tooltip-auto[x-placement^="right"] { + padding: 0 0.4rem; +} +.bs-tooltip-right .arrow, .bs-tooltip-auto[x-placement^="right"] .arrow { + left: 0; + width: 0.4rem; + height: 0.8rem; +} +.bs-tooltip-right .arrow::before, .bs-tooltip-auto[x-placement^="right"] .arrow::before { + right: 0; + border-width: 0.4rem 0.4rem 0.4rem 0; + border-right-color: #000; +} + +.bs-tooltip-bottom, .bs-tooltip-auto[x-placement^="bottom"] { + padding: 0.4rem 0; +} +.bs-tooltip-bottom .arrow, .bs-tooltip-auto[x-placement^="bottom"] .arrow { + top: 0; +} +.bs-tooltip-bottom .arrow::before, .bs-tooltip-auto[x-placement^="bottom"] .arrow::before { + bottom: 0; + border-width: 0 0.4rem 0.4rem; + border-bottom-color: #000; +} + +.bs-tooltip-left, .bs-tooltip-auto[x-placement^="left"] { + padding: 0 0.4rem; +} +.bs-tooltip-left .arrow, .bs-tooltip-auto[x-placement^="left"] .arrow { + right: 0; + width: 0.4rem; + height: 0.8rem; +} +.bs-tooltip-left .arrow::before, .bs-tooltip-auto[x-placement^="left"] .arrow::before { + left: 0; + border-width: 0.4rem 0 0.4rem 0.4rem; + border-left-color: #000; +} + +.tooltip-inner { + max-width: 200px; + padding: 0.25rem 0.5rem; + color: #fff; + text-align: center; + background-color: #000; + border-radius: 0.25rem; +} + +.popover { + position: absolute; + top: 0; + left: 0; + z-index: 1060; + display: block; + max-width: 276px; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-style: normal; + font-weight: 400; + line-height: 1.5; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + word-spacing: normal; + white-space: normal; + line-break: auto; + font-size: 0.875rem; + word-wrap: break-word; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.2); + border-radius: 0.3rem; +} +.popover .arrow { + position: absolute; + display: block; + width: 1rem; + height: 0.5rem; + margin: 0 0.3rem; +} +.popover .arrow::before, .popover .arrow::after { + position: absolute; + display: block; + content: ""; + border-color: transparent; + border-style: solid; +} + +.bs-popover-top, .bs-popover-auto[x-placement^="top"] { + margin-bottom: 0.5rem; +} +.bs-popover-top .arrow, .bs-popover-auto[x-placement^="top"] .arrow { + bottom: calc((0.5rem + 1px) * -1); +} +.bs-popover-top .arrow::before, .bs-popover-auto[x-placement^="top"] .arrow::before, +.bs-popover-top .arrow::after, +.bs-popover-auto[x-placement^="top"] .arrow::after { + border-width: 0.5rem 0.5rem 0; +} +.bs-popover-top .arrow::before, .bs-popover-auto[x-placement^="top"] .arrow::before { + bottom: 0; + border-top-color: rgba(0, 0, 0, 0.25); +} +.bs-popover-top .arrow::after, .bs-popover-auto[x-placement^="top"] .arrow::after { + bottom: 1px; + border-top-color: #fff; +} + +.bs-popover-right, .bs-popover-auto[x-placement^="right"] { + margin-left: 0.5rem; +} +.bs-popover-right .arrow, .bs-popover-auto[x-placement^="right"] .arrow { + left: calc((0.5rem + 1px) * -1); + width: 0.5rem; + height: 1rem; + margin: 0.3rem 0; +} +.bs-popover-right .arrow::before, .bs-popover-auto[x-placement^="right"] .arrow::before, +.bs-popover-right .arrow::after, +.bs-popover-auto[x-placement^="right"] .arrow::after { + border-width: 0.5rem 0.5rem 0.5rem 0; +} +.bs-popover-right .arrow::before, .bs-popover-auto[x-placement^="right"] .arrow::before { + left: 0; + border-right-color: rgba(0, 0, 0, 0.25); +} +.bs-popover-right .arrow::after, .bs-popover-auto[x-placement^="right"] .arrow::after { + left: 1px; + border-right-color: #fff; +} + +.bs-popover-bottom, .bs-popover-auto[x-placement^="bottom"] { + margin-top: 0.5rem; +} +.bs-popover-bottom .arrow, .bs-popover-auto[x-placement^="bottom"] .arrow { + top: calc((0.5rem + 1px) * -1); +} +.bs-popover-bottom .arrow::before, .bs-popover-auto[x-placement^="bottom"] .arrow::before, +.bs-popover-bottom .arrow::after, +.bs-popover-auto[x-placement^="bottom"] .arrow::after { + border-width: 0 0.5rem 0.5rem 0.5rem; +} +.bs-popover-bottom .arrow::before, .bs-popover-auto[x-placement^="bottom"] .arrow::before { + top: 0; + border-bottom-color: rgba(0, 0, 0, 0.25); +} +.bs-popover-bottom .arrow::after, .bs-popover-auto[x-placement^="bottom"] .arrow::after { + top: 1px; + border-bottom-color: #fff; +} +.bs-popover-bottom .popover-header::before, .bs-popover-auto[x-placement^="bottom"] .popover-header::before { + position: absolute; + top: 0; + left: 50%; + display: block; + width: 1rem; + margin-left: -0.5rem; + content: ""; + border-bottom: 1px solid #f7f7f7; +} + +.bs-popover-left, .bs-popover-auto[x-placement^="left"] { + margin-right: 0.5rem; +} +.bs-popover-left .arrow, .bs-popover-auto[x-placement^="left"] .arrow { + right: calc((0.5rem + 1px) * -1); + width: 0.5rem; + height: 1rem; + margin: 0.3rem 0; +} +.bs-popover-left .arrow::before, .bs-popover-auto[x-placement^="left"] .arrow::before, +.bs-popover-left .arrow::after, +.bs-popover-auto[x-placement^="left"] .arrow::after { + border-width: 0.5rem 0 0.5rem 0.5rem; +} +.bs-popover-left .arrow::before, .bs-popover-auto[x-placement^="left"] .arrow::before { + right: 0; + border-left-color: rgba(0, 0, 0, 0.25); +} +.bs-popover-left .arrow::after, .bs-popover-auto[x-placement^="left"] .arrow::after { + right: 1px; + border-left-color: #fff; +} + +.popover-header { + padding: 0.5rem 0.75rem; + margin-bottom: 0; + font-size: 1rem; + color: inherit; + background-color: #f7f7f7; + border-bottom: 1px solid #ebebeb; + border-top-left-radius: calc(0.3rem - 1px); + border-top-right-radius: calc(0.3rem - 1px); +} +.popover-header:empty { + display: none; +} + +.popover-body { + padding: 0.5rem 0.75rem; + color: #212529; +} + +.carousel { + position: relative; +} + +.carousel-inner { + position: relative; + width: 100%; + overflow: hidden; +} + +.carousel-item { + position: relative; + display: none; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + width: 100%; + -webkit-transition: -webkit-transform 0.6s ease; + transition: -webkit-transform 0.6s ease; + transition: transform 0.6s ease; + transition: transform 0.6s ease, -webkit-transform 0.6s ease; + -webkit-backface-visibility: hidden; + backface-visibility: hidden; + -webkit-perspective: 1000px; + perspective: 1000px; +} + +.carousel-item.active, +.carousel-item-next, +.carousel-item-prev { + display: block; +} + +.carousel-item-next, +.carousel-item-prev { + position: absolute; + top: 0; +} + +.carousel-item-next.carousel-item-left, +.carousel-item-prev.carousel-item-right { + -webkit-transform: translateX(0); + transform: translateX(0); +} +@supports (transform-style: preserve-3d) { + .carousel-item-next.carousel-item-left, + .carousel-item-prev.carousel-item-right { + -webkit-transform: translate3d(0, 0, 0); + transform: translate3d(0, 0, 0); + } +} + +.carousel-item-next, +.active.carousel-item-right { + -webkit-transform: translateX(100%); + transform: translateX(100%); +} +@supports (transform-style: preserve-3d) { + .carousel-item-next, + .active.carousel-item-right { + -webkit-transform: translate3d(100%, 0, 0); + transform: translate3d(100%, 0, 0); + } +} + +.carousel-item-prev, +.active.carousel-item-left { + -webkit-transform: translateX(-100%); + transform: translateX(-100%); +} +@supports (transform-style: preserve-3d) { + .carousel-item-prev, + .active.carousel-item-left { + -webkit-transform: translate3d(-100%, 0, 0); + transform: translate3d(-100%, 0, 0); + } +} + +.carousel-control-prev, +.carousel-control-next { + position: absolute; + top: 0; + bottom: 0; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + width: 15%; + color: #fff; + text-align: center; + opacity: 0.5; +} +.carousel-control-prev:hover, .carousel-control-prev:focus, +.carousel-control-next:hover, +.carousel-control-next:focus { + color: #fff; + text-decoration: none; + outline: 0; + opacity: .9; +} + +.carousel-control-prev { + left: 0; +} + +.carousel-control-next { + right: 0; +} + +.carousel-control-prev-icon, +.carousel-control-next-icon { + display: inline-block; + width: 20px; + height: 20px; + background: transparent no-repeat center center; + background-size: 100% 100%; +} + +.carousel-control-prev-icon { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M5.25 0l-4 4 4 4 1.5-1.5-2.5-2.5 2.5-2.5-1.5-1.5z'/%3E%3C/svg%3E"); +} + +.carousel-control-next-icon { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M2.75 0l-1.5 1.5 2.5 2.5-2.5 2.5 1.5 1.5 4-4-4-4z'/%3E%3C/svg%3E"); +} + +.carousel-indicators { + position: absolute; + right: 0; + bottom: 10px; + left: 0; + z-index: 15; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + padding-left: 0; + margin-right: 15%; + margin-left: 15%; + list-style: none; +} +.carousel-indicators li { + position: relative; + -webkit-box-flex: 0; + -ms-flex: 0 1 auto; + flex: 0 1 auto; + width: 30px; + height: 3px; + margin-right: 3px; + margin-left: 3px; + text-indent: -999px; + background-color: rgba(255, 255, 255, 0.5); +} +.carousel-indicators li::before { + position: absolute; + top: -10px; + left: 0; + display: inline-block; + width: 100%; + height: 10px; + content: ""; +} +.carousel-indicators li::after { + position: absolute; + bottom: -10px; + left: 0; + display: inline-block; + width: 100%; + height: 10px; + content: ""; +} +.carousel-indicators .active { + background-color: #fff; +} + +.carousel-caption { + position: absolute; + right: 15%; + bottom: 20px; + left: 15%; + z-index: 10; + padding-top: 20px; + padding-bottom: 20px; + color: #fff; + text-align: center; +} + +.align-baseline { + vertical-align: baseline !important; +} + +.align-top { + vertical-align: top !important; +} + +.align-middle { + vertical-align: middle !important; +} + +.align-bottom { + vertical-align: bottom !important; +} + +.align-text-bottom { + vertical-align: text-bottom !important; +} + +.align-text-top { + vertical-align: text-top !important; +} + +.bg-primary { + background-color: #007bff !important; +} + +a.bg-primary:hover, a.bg-primary:focus, +button.bg-primary:hover, +button.bg-primary:focus { + background-color: #0062cc !important; +} + +.bg-secondary { + background-color: #6c757d !important; +} + +a.bg-secondary:hover, a.bg-secondary:focus, +button.bg-secondary:hover, +button.bg-secondary:focus { + background-color: #545b62 !important; +} + +.bg-success { + background-color: #28a745 !important; +} + +a.bg-success:hover, a.bg-success:focus, +button.bg-success:hover, +button.bg-success:focus { + background-color: #1e7e34 !important; +} + +.bg-info { + background-color: #17a2b8 !important; +} + +a.bg-info:hover, a.bg-info:focus, +button.bg-info:hover, +button.bg-info:focus { + background-color: #117a8b !important; +} + +.bg-warning { + background-color: #ffc107 !important; +} + +a.bg-warning:hover, a.bg-warning:focus, +button.bg-warning:hover, +button.bg-warning:focus { + background-color: #d39e00 !important; +} + +.bg-danger { + background-color: #dc3545 !important; +} + +a.bg-danger:hover, a.bg-danger:focus, +button.bg-danger:hover, +button.bg-danger:focus { + background-color: #bd2130 !important; +} + +.bg-light { + background-color: #f8f9fa !important; +} + +a.bg-light:hover, a.bg-light:focus, +button.bg-light:hover, +button.bg-light:focus { + background-color: #dae0e5 !important; +} + +.bg-dark { + background-color: #343a40 !important; +} + +a.bg-dark:hover, a.bg-dark:focus, +button.bg-dark:hover, +button.bg-dark:focus { + background-color: #1d2124 !important; +} + +.bg-white { + background-color: #fff !important; +} + +.bg-transparent { + background-color: transparent !important; +} + +.border { + border: 1px solid #dee2e6 !important; +} + +.border-top { + border-top: 1px solid #dee2e6 !important; +} + +.border-right { + border-right: 1px solid #dee2e6 !important; +} + +.border-bottom { + border-bottom: 1px solid #dee2e6 !important; +} + +.border-left { + border-left: 1px solid #dee2e6 !important; +} + +.border-0 { + border: 0 !important; +} + +.border-top-0 { + border-top: 0 !important; +} + +.border-right-0 { + border-right: 0 !important; +} + +.border-bottom-0 { + border-bottom: 0 !important; +} + +.border-left-0 { + border-left: 0 !important; +} + +.border-primary { + border-color: #007bff !important; +} + +.border-secondary { + border-color: #6c757d !important; +} + +.border-success { + border-color: #28a745 !important; +} + +.border-info { + border-color: #17a2b8 !important; +} + +.border-warning { + border-color: #ffc107 !important; +} + +.border-danger { + border-color: #dc3545 !important; +} + +.border-light { + border-color: #f8f9fa !important; +} + +.border-dark { + border-color: #343a40 !important; +} + +.border-white { + border-color: #fff !important; +} + +.rounded { + border-radius: 0.25rem !important; +} + +.rounded-top { + border-top-left-radius: 0.25rem !important; + border-top-right-radius: 0.25rem !important; +} + +.rounded-right { + border-top-right-radius: 0.25rem !important; + border-bottom-right-radius: 0.25rem !important; +} + +.rounded-bottom { + border-bottom-right-radius: 0.25rem !important; + border-bottom-left-radius: 0.25rem !important; +} + +.rounded-left { + border-top-left-radius: 0.25rem !important; + border-bottom-left-radius: 0.25rem !important; +} + +.rounded-circle { + border-radius: 50% !important; +} + +.rounded-0 { + border-radius: 0 !important; +} + +.clearfix::after { + display: block; + clear: both; + content: ""; +} + +.d-none { + display: none !important; +} + +.d-inline { + display: inline !important; +} + +.d-inline-block { + display: inline-block !important; +} + +.d-block { + display: block !important; +} + +.d-table { + display: table !important; +} + +.d-table-row { + display: table-row !important; +} + +.d-table-cell { + display: table-cell !important; +} + +.d-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; +} + +.d-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; +} + +@media (min-width: 576px) { + .d-sm-none { + display: none !important; + } + + .d-sm-inline { + display: inline !important; + } + + .d-sm-inline-block { + display: inline-block !important; + } + + .d-sm-block { + display: block !important; + } + + .d-sm-table { + display: table !important; + } + + .d-sm-table-row { + display: table-row !important; + } + + .d-sm-table-cell { + display: table-cell !important; + } + + .d-sm-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-sm-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +@media (min-width: 768px) { + .d-md-none { + display: none !important; + } + + .d-md-inline { + display: inline !important; + } + + .d-md-inline-block { + display: inline-block !important; + } + + .d-md-block { + display: block !important; + } + + .d-md-table { + display: table !important; + } + + .d-md-table-row { + display: table-row !important; + } + + .d-md-table-cell { + display: table-cell !important; + } + + .d-md-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-md-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +@media (min-width: 992px) { + .d-lg-none { + display: none !important; + } + + .d-lg-inline { + display: inline !important; + } + + .d-lg-inline-block { + display: inline-block !important; + } + + .d-lg-block { + display: block !important; + } + + .d-lg-table { + display: table !important; + } + + .d-lg-table-row { + display: table-row !important; + } + + .d-lg-table-cell { + display: table-cell !important; + } + + .d-lg-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-lg-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +@media (min-width: 1200px) { + .d-xl-none { + display: none !important; + } + + .d-xl-inline { + display: inline !important; + } + + .d-xl-inline-block { + display: inline-block !important; + } + + .d-xl-block { + display: block !important; + } + + .d-xl-table { + display: table !important; + } + + .d-xl-table-row { + display: table-row !important; + } + + .d-xl-table-cell { + display: table-cell !important; + } + + .d-xl-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-xl-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +@media print { + .d-print-none { + display: none !important; + } + + .d-print-inline { + display: inline !important; + } + + .d-print-inline-block { + display: inline-block !important; + } + + .d-print-block { + display: block !important; + } + + .d-print-table { + display: table !important; + } + + .d-print-table-row { + display: table-row !important; + } + + .d-print-table-cell { + display: table-cell !important; + } + + .d-print-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-print-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +.embed-responsive { + position: relative; + display: block; + width: 100%; + padding: 0; + overflow: hidden; +} +.embed-responsive::before { + display: block; + content: ""; +} +.embed-responsive .embed-responsive-item, +.embed-responsive iframe, +.embed-responsive embed, +.embed-responsive object, +.embed-responsive video { + position: absolute; + top: 0; + bottom: 0; + left: 0; + width: 100%; + height: 100%; + border: 0; +} + +.embed-responsive-21by9::before { + padding-top: 42.8571428571%; +} + +.embed-responsive-16by9::before { + padding-top: 56.25%; +} + +.embed-responsive-4by3::before { + padding-top: 75%; +} + +.embed-responsive-1by1::before { + padding-top: 100%; +} + +.flex-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; +} + +.flex-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; +} + +.flex-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; +} + +.flex-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; +} + +.flex-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; +} + +.flex-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; +} + +.flex-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; +} + +.justify-content-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; +} + +.justify-content-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; +} + +.justify-content-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; +} + +.justify-content-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; +} + +.justify-content-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; +} + +.align-items-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; +} + +.align-items-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; +} + +.align-items-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; +} + +.align-items-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; +} + +.align-items-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; +} + +.align-content-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; +} + +.align-content-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; +} + +.align-content-center { + -ms-flex-line-pack: center !important; + align-content: center !important; +} + +.align-content-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; +} + +.align-content-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; +} + +.align-content-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; +} + +.align-self-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; +} + +.align-self-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; +} + +.align-self-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; +} + +.align-self-center { + -ms-flex-item-align: center !important; + align-self: center !important; +} + +.align-self-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; +} + +.align-self-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; +} + +@media (min-width: 576px) { + .flex-sm-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; + } + + .flex-sm-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; + } + + .flex-sm-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + + .flex-sm-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + + .flex-sm-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + + .flex-sm-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + + .flex-sm-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + + .justify-content-sm-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + + .justify-content-sm-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + + .justify-content-sm-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; + } + + .justify-content-sm-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + + .justify-content-sm-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + + .align-items-sm-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; + } + + .align-items-sm-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; + } + + .align-items-sm-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; + } + + .align-items-sm-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + + .align-items-sm-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + + .align-content-sm-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + + .align-content-sm-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + + .align-content-sm-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + + .align-content-sm-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + + .align-content-sm-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + + .align-content-sm-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + + .align-self-sm-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + + .align-self-sm-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + + .align-self-sm-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + + .align-self-sm-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + + .align-self-sm-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + + .align-self-sm-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} +@media (min-width: 768px) { + .flex-md-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; + } + + .flex-md-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; + } + + .flex-md-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + + .flex-md-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + + .flex-md-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + + .flex-md-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + + .flex-md-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + + .justify-content-md-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + + .justify-content-md-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + + .justify-content-md-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; + } + + .justify-content-md-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + + .justify-content-md-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + + .align-items-md-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; + } + + .align-items-md-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; + } + + .align-items-md-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; + } + + .align-items-md-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + + .align-items-md-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + + .align-content-md-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + + .align-content-md-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + + .align-content-md-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + + .align-content-md-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + + .align-content-md-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + + .align-content-md-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + + .align-self-md-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + + .align-self-md-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + + .align-self-md-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + + .align-self-md-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + + .align-self-md-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + + .align-self-md-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} +@media (min-width: 992px) { + .flex-lg-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; + } + + .flex-lg-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; + } + + .flex-lg-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + + .flex-lg-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + + .flex-lg-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + + .flex-lg-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + + .flex-lg-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + + .justify-content-lg-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + + .justify-content-lg-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + + .justify-content-lg-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; + } + + .justify-content-lg-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + + .justify-content-lg-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + + .align-items-lg-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; + } + + .align-items-lg-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; + } + + .align-items-lg-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; + } + + .align-items-lg-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + + .align-items-lg-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + + .align-content-lg-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + + .align-content-lg-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + + .align-content-lg-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + + .align-content-lg-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + + .align-content-lg-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + + .align-content-lg-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + + .align-self-lg-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + + .align-self-lg-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + + .align-self-lg-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + + .align-self-lg-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + + .align-self-lg-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + + .align-self-lg-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} +@media (min-width: 1200px) { + .flex-xl-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; + } + + .flex-xl-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; + } + + .flex-xl-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + + .flex-xl-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + + .flex-xl-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + + .flex-xl-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + + .flex-xl-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + + .justify-content-xl-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + + .justify-content-xl-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + + .justify-content-xl-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; + } + + .justify-content-xl-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + + .justify-content-xl-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + + .align-items-xl-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; + } + + .align-items-xl-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; + } + + .align-items-xl-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; + } + + .align-items-xl-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + + .align-items-xl-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + + .align-content-xl-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + + .align-content-xl-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + + .align-content-xl-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + + .align-content-xl-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + + .align-content-xl-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + + .align-content-xl-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + + .align-self-xl-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + + .align-self-xl-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + + .align-self-xl-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + + .align-self-xl-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + + .align-self-xl-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + + .align-self-xl-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} +.float-left { + float: left !important; +} + +.float-right { + float: right !important; +} + +.float-none { + float: none !important; +} + +@media (min-width: 576px) { + .float-sm-left { + float: left !important; + } + + .float-sm-right { + float: right !important; + } + + .float-sm-none { + float: none !important; + } +} +@media (min-width: 768px) { + .float-md-left { + float: left !important; + } + + .float-md-right { + float: right !important; + } + + .float-md-none { + float: none !important; + } +} +@media (min-width: 992px) { + .float-lg-left { + float: left !important; + } + + .float-lg-right { + float: right !important; + } + + .float-lg-none { + float: none !important; + } +} +@media (min-width: 1200px) { + .float-xl-left { + float: left !important; + } + + .float-xl-right { + float: right !important; + } + + .float-xl-none { + float: none !important; + } +} +.position-static { + position: static !important; +} + +.position-relative { + position: relative !important; +} + +.position-absolute { + position: absolute !important; +} + +.position-fixed { + position: fixed !important; +} + +.position-sticky { + position: sticky !important; +} + +.fixed-top { + position: fixed; + top: 0; + right: 0; + left: 0; + z-index: 1030; +} + +.fixed-bottom { + position: fixed; + right: 0; + bottom: 0; + left: 0; + z-index: 1030; +} + +@supports (position: sticky) { + .sticky-top { + position: sticky; + top: 0; + z-index: 1020; + } +} + +.sr-only { + position: absolute; + width: 1px; + height: 1px; + padding: 0; + overflow: hidden; + clip: rect(0, 0, 0, 0); + white-space: nowrap; + -webkit-clip-path: inset(50%); + clip-path: inset(50%); + border: 0; +} + +.sr-only-focusable:active, .sr-only-focusable:focus { + position: static; + width: auto; + height: auto; + overflow: visible; + clip: auto; + white-space: normal; + -webkit-clip-path: none; + clip-path: none; +} + +.w-25 { + width: 25% !important; +} + +.w-50 { + width: 50% !important; +} + +.w-75 { + width: 75% !important; +} + +.w-100 { + width: 100% !important; +} + +.h-25 { + height: 25% !important; +} + +.h-50 { + height: 50% !important; +} + +.h-75 { + height: 75% !important; +} + +.h-100 { + height: 100% !important; +} + +.mw-100 { + max-width: 100% !important; +} + +.mh-100 { + max-height: 100% !important; +} + +.m-0 { + margin: 0 !important; +} + +.mt-0, +.my-0 { + margin-top: 0 !important; +} + +.mr-0, +.mx-0 { + margin-right: 0 !important; +} + +.mb-0, +.my-0 { + margin-bottom: 0 !important; +} + +.ml-0, +.mx-0 { + margin-left: 0 !important; +} + +.m-1 { + margin: 0.25rem !important; +} + +.mt-1, +.my-1 { + margin-top: 0.25rem !important; +} + +.mr-1, +.mx-1 { + margin-right: 0.25rem !important; +} + +.mb-1, +.my-1 { + margin-bottom: 0.25rem !important; +} + +.ml-1, +.mx-1 { + margin-left: 0.25rem !important; +} + +.m-2 { + margin: 0.5rem !important; +} + +.mt-2, +.my-2 { + margin-top: 0.5rem !important; +} + +.mr-2, +.mx-2 { + margin-right: 0.5rem !important; +} + +.mb-2, +.my-2 { + margin-bottom: 0.5rem !important; +} + +.ml-2, +.mx-2 { + margin-left: 0.5rem !important; +} + +.m-3 { + margin: 1rem !important; +} + +.mt-3, +.my-3 { + margin-top: 1rem !important; +} + +.mr-3, +.mx-3 { + margin-right: 1rem !important; +} + +.mb-3, +.my-3 { + margin-bottom: 1rem !important; +} + +.ml-3, +.mx-3 { + margin-left: 1rem !important; +} + +.m-4 { + margin: 1.5rem !important; +} + +.mt-4, +.my-4 { + margin-top: 1.5rem !important; +} + +.mr-4, +.mx-4 { + margin-right: 1.5rem !important; +} + +.mb-4, +.my-4 { + margin-bottom: 1.5rem !important; +} + +.ml-4, +.mx-4 { + margin-left: 1.5rem !important; +} + +.m-5 { + margin: 3rem !important; +} + +.mt-5, +.my-5 { + margin-top: 3rem !important; +} + +.mr-5, +.mx-5 { + margin-right: 3rem !important; +} + +.mb-5, +.my-5 { + margin-bottom: 3rem !important; +} + +.ml-5, +.mx-5 { + margin-left: 3rem !important; +} + +.p-0 { + padding: 0 !important; +} + +.pt-0, +.py-0 { + padding-top: 0 !important; +} + +.pr-0, +.px-0 { + padding-right: 0 !important; +} + +.pb-0, +.py-0 { + padding-bottom: 0 !important; +} + +.pl-0, +.px-0 { + padding-left: 0 !important; +} + +.p-1 { + padding: 0.25rem !important; +} + +.pt-1, +.py-1 { + padding-top: 0.25rem !important; +} + +.pr-1, +.px-1 { + padding-right: 0.25rem !important; +} + +.pb-1, +.py-1 { + padding-bottom: 0.25rem !important; +} + +.pl-1, +.px-1 { + padding-left: 0.25rem !important; +} + +.p-2 { + padding: 0.5rem !important; +} + +.pt-2, +.py-2 { + padding-top: 0.5rem !important; +} + +.pr-2, +.px-2 { + padding-right: 0.5rem !important; +} + +.pb-2, +.py-2 { + padding-bottom: 0.5rem !important; +} + +.pl-2, +.px-2 { + padding-left: 0.5rem !important; +} + +.p-3 { + padding: 1rem !important; +} + +.pt-3, +.py-3 { + padding-top: 1rem !important; +} + +.pr-3, +.px-3 { + padding-right: 1rem !important; +} + +.pb-3, +.py-3 { + padding-bottom: 1rem !important; +} + +.pl-3, +.px-3 { + padding-left: 1rem !important; +} + +.p-4 { + padding: 1.5rem !important; +} + +.pt-4, +.py-4 { + padding-top: 1.5rem !important; +} + +.pr-4, +.px-4 { + padding-right: 1.5rem !important; +} + +.pb-4, +.py-4 { + padding-bottom: 1.5rem !important; +} + +.pl-4, +.px-4 { + padding-left: 1.5rem !important; +} + +.p-5 { + padding: 3rem !important; +} + +.pt-5, +.py-5 { + padding-top: 3rem !important; +} + +.pr-5, +.px-5 { + padding-right: 3rem !important; +} + +.pb-5, +.py-5 { + padding-bottom: 3rem !important; +} + +.pl-5, +.px-5 { + padding-left: 3rem !important; +} + +.m-auto { + margin: auto !important; +} + +.mt-auto, +.my-auto { + margin-top: auto !important; +} + +.mr-auto, +.mx-auto { + margin-right: auto !important; +} + +.mb-auto, +.my-auto { + margin-bottom: auto !important; +} + +.ml-auto, +.mx-auto { + margin-left: auto !important; +} + +@media (min-width: 576px) { + .m-sm-0 { + margin: 0 !important; + } + + .mt-sm-0, + .my-sm-0 { + margin-top: 0 !important; + } + + .mr-sm-0, + .mx-sm-0 { + margin-right: 0 !important; + } + + .mb-sm-0, + .my-sm-0 { + margin-bottom: 0 !important; + } + + .ml-sm-0, + .mx-sm-0 { + margin-left: 0 !important; + } + + .m-sm-1 { + margin: 0.25rem !important; + } + + .mt-sm-1, + .my-sm-1 { + margin-top: 0.25rem !important; + } + + .mr-sm-1, + .mx-sm-1 { + margin-right: 0.25rem !important; + } + + .mb-sm-1, + .my-sm-1 { + margin-bottom: 0.25rem !important; + } + + .ml-sm-1, + .mx-sm-1 { + margin-left: 0.25rem !important; + } + + .m-sm-2 { + margin: 0.5rem !important; + } + + .mt-sm-2, + .my-sm-2 { + margin-top: 0.5rem !important; + } + + .mr-sm-2, + .mx-sm-2 { + margin-right: 0.5rem !important; + } + + .mb-sm-2, + .my-sm-2 { + margin-bottom: 0.5rem !important; + } + + .ml-sm-2, + .mx-sm-2 { + margin-left: 0.5rem !important; + } + + .m-sm-3 { + margin: 1rem !important; + } + + .mt-sm-3, + .my-sm-3 { + margin-top: 1rem !important; + } + + .mr-sm-3, + .mx-sm-3 { + margin-right: 1rem !important; + } + + .mb-sm-3, + .my-sm-3 { + margin-bottom: 1rem !important; + } + + .ml-sm-3, + .mx-sm-3 { + margin-left: 1rem !important; + } + + .m-sm-4 { + margin: 1.5rem !important; + } + + .mt-sm-4, + .my-sm-4 { + margin-top: 1.5rem !important; + } + + .mr-sm-4, + .mx-sm-4 { + margin-right: 1.5rem !important; + } + + .mb-sm-4, + .my-sm-4 { + margin-bottom: 1.5rem !important; + } + + .ml-sm-4, + .mx-sm-4 { + margin-left: 1.5rem !important; + } + + .m-sm-5 { + margin: 3rem !important; + } + + .mt-sm-5, + .my-sm-5 { + margin-top: 3rem !important; + } + + .mr-sm-5, + .mx-sm-5 { + margin-right: 3rem !important; + } + + .mb-sm-5, + .my-sm-5 { + margin-bottom: 3rem !important; + } + + .ml-sm-5, + .mx-sm-5 { + margin-left: 3rem !important; + } + + .p-sm-0 { + padding: 0 !important; + } + + .pt-sm-0, + .py-sm-0 { + padding-top: 0 !important; + } + + .pr-sm-0, + .px-sm-0 { + padding-right: 0 !important; + } + + .pb-sm-0, + .py-sm-0 { + padding-bottom: 0 !important; + } + + .pl-sm-0, + .px-sm-0 { + padding-left: 0 !important; + } + + .p-sm-1 { + padding: 0.25rem !important; + } + + .pt-sm-1, + .py-sm-1 { + padding-top: 0.25rem !important; + } + + .pr-sm-1, + .px-sm-1 { + padding-right: 0.25rem !important; + } + + .pb-sm-1, + .py-sm-1 { + padding-bottom: 0.25rem !important; + } + + .pl-sm-1, + .px-sm-1 { + padding-left: 0.25rem !important; + } + + .p-sm-2 { + padding: 0.5rem !important; + } + + .pt-sm-2, + .py-sm-2 { + padding-top: 0.5rem !important; + } + + .pr-sm-2, + .px-sm-2 { + padding-right: 0.5rem !important; + } + + .pb-sm-2, + .py-sm-2 { + padding-bottom: 0.5rem !important; + } + + .pl-sm-2, + .px-sm-2 { + padding-left: 0.5rem !important; + } + + .p-sm-3 { + padding: 1rem !important; + } + + .pt-sm-3, + .py-sm-3 { + padding-top: 1rem !important; + } + + .pr-sm-3, + .px-sm-3 { + padding-right: 1rem !important; + } + + .pb-sm-3, + .py-sm-3 { + padding-bottom: 1rem !important; + } + + .pl-sm-3, + .px-sm-3 { + padding-left: 1rem !important; + } + + .p-sm-4 { + padding: 1.5rem !important; + } + + .pt-sm-4, + .py-sm-4 { + padding-top: 1.5rem !important; + } + + .pr-sm-4, + .px-sm-4 { + padding-right: 1.5rem !important; + } + + .pb-sm-4, + .py-sm-4 { + padding-bottom: 1.5rem !important; + } + + .pl-sm-4, + .px-sm-4 { + padding-left: 1.5rem !important; + } + + .p-sm-5 { + padding: 3rem !important; + } + + .pt-sm-5, + .py-sm-5 { + padding-top: 3rem !important; + } + + .pr-sm-5, + .px-sm-5 { + padding-right: 3rem !important; + } + + .pb-sm-5, + .py-sm-5 { + padding-bottom: 3rem !important; + } + + .pl-sm-5, + .px-sm-5 { + padding-left: 3rem !important; + } + + .m-sm-auto { + margin: auto !important; + } + + .mt-sm-auto, + .my-sm-auto { + margin-top: auto !important; + } + + .mr-sm-auto, + .mx-sm-auto { + margin-right: auto !important; + } + + .mb-sm-auto, + .my-sm-auto { + margin-bottom: auto !important; + } + + .ml-sm-auto, + .mx-sm-auto { + margin-left: auto !important; + } +} +@media (min-width: 768px) { + .m-md-0 { + margin: 0 !important; + } + + .mt-md-0, + .my-md-0 { + margin-top: 0 !important; + } + + .mr-md-0, + .mx-md-0 { + margin-right: 0 !important; + } + + .mb-md-0, + .my-md-0 { + margin-bottom: 0 !important; + } + + .ml-md-0, + .mx-md-0 { + margin-left: 0 !important; + } + + .m-md-1 { + margin: 0.25rem !important; + } + + .mt-md-1, + .my-md-1 { + margin-top: 0.25rem !important; + } + + .mr-md-1, + .mx-md-1 { + margin-right: 0.25rem !important; + } + + .mb-md-1, + .my-md-1 { + margin-bottom: 0.25rem !important; + } + + .ml-md-1, + .mx-md-1 { + margin-left: 0.25rem !important; + } + + .m-md-2 { + margin: 0.5rem !important; + } + + .mt-md-2, + .my-md-2 { + margin-top: 0.5rem !important; + } + + .mr-md-2, + .mx-md-2 { + margin-right: 0.5rem !important; + } + + .mb-md-2, + .my-md-2 { + margin-bottom: 0.5rem !important; + } + + .ml-md-2, + .mx-md-2 { + margin-left: 0.5rem !important; + } + + .m-md-3 { + margin: 1rem !important; + } + + .mt-md-3, + .my-md-3 { + margin-top: 1rem !important; + } + + .mr-md-3, + .mx-md-3 { + margin-right: 1rem !important; + } + + .mb-md-3, + .my-md-3 { + margin-bottom: 1rem !important; + } + + .ml-md-3, + .mx-md-3 { + margin-left: 1rem !important; + } + + .m-md-4 { + margin: 1.5rem !important; + } + + .mt-md-4, + .my-md-4 { + margin-top: 1.5rem !important; + } + + .mr-md-4, + .mx-md-4 { + margin-right: 1.5rem !important; + } + + .mb-md-4, + .my-md-4 { + margin-bottom: 1.5rem !important; + } + + .ml-md-4, + .mx-md-4 { + margin-left: 1.5rem !important; + } + + .m-md-5 { + margin: 3rem !important; + } + + .mt-md-5, + .my-md-5 { + margin-top: 3rem !important; + } + + .mr-md-5, + .mx-md-5 { + margin-right: 3rem !important; + } + + .mb-md-5, + .my-md-5 { + margin-bottom: 3rem !important; + } + + .ml-md-5, + .mx-md-5 { + margin-left: 3rem !important; + } + + .p-md-0 { + padding: 0 !important; + } + + .pt-md-0, + .py-md-0 { + padding-top: 0 !important; + } + + .pr-md-0, + .px-md-0 { + padding-right: 0 !important; + } + + .pb-md-0, + .py-md-0 { + padding-bottom: 0 !important; + } + + .pl-md-0, + .px-md-0 { + padding-left: 0 !important; + } + + .p-md-1 { + padding: 0.25rem !important; + } + + .pt-md-1, + .py-md-1 { + padding-top: 0.25rem !important; + } + + .pr-md-1, + .px-md-1 { + padding-right: 0.25rem !important; + } + + .pb-md-1, + .py-md-1 { + padding-bottom: 0.25rem !important; + } + + .pl-md-1, + .px-md-1 { + padding-left: 0.25rem !important; + } + + .p-md-2 { + padding: 0.5rem !important; + } + + .pt-md-2, + .py-md-2 { + padding-top: 0.5rem !important; + } + + .pr-md-2, + .px-md-2 { + padding-right: 0.5rem !important; + } + + .pb-md-2, + .py-md-2 { + padding-bottom: 0.5rem !important; + } + + .pl-md-2, + .px-md-2 { + padding-left: 0.5rem !important; + } + + .p-md-3 { + padding: 1rem !important; + } + + .pt-md-3, + .py-md-3 { + padding-top: 1rem !important; + } + + .pr-md-3, + .px-md-3 { + padding-right: 1rem !important; + } + + .pb-md-3, + .py-md-3 { + padding-bottom: 1rem !important; + } + + .pl-md-3, + .px-md-3 { + padding-left: 1rem !important; + } + + .p-md-4 { + padding: 1.5rem !important; + } + + .pt-md-4, + .py-md-4 { + padding-top: 1.5rem !important; + } + + .pr-md-4, + .px-md-4 { + padding-right: 1.5rem !important; + } + + .pb-md-4, + .py-md-4 { + padding-bottom: 1.5rem !important; + } + + .pl-md-4, + .px-md-4 { + padding-left: 1.5rem !important; + } + + .p-md-5 { + padding: 3rem !important; + } + + .pt-md-5, + .py-md-5 { + padding-top: 3rem !important; + } + + .pr-md-5, + .px-md-5 { + padding-right: 3rem !important; + } + + .pb-md-5, + .py-md-5 { + padding-bottom: 3rem !important; + } + + .pl-md-5, + .px-md-5 { + padding-left: 3rem !important; + } + + .m-md-auto { + margin: auto !important; + } + + .mt-md-auto, + .my-md-auto { + margin-top: auto !important; + } + + .mr-md-auto, + .mx-md-auto { + margin-right: auto !important; + } + + .mb-md-auto, + .my-md-auto { + margin-bottom: auto !important; + } + + .ml-md-auto, + .mx-md-auto { + margin-left: auto !important; + } +} +@media (min-width: 992px) { + .m-lg-0 { + margin: 0 !important; + } + + .mt-lg-0, + .my-lg-0 { + margin-top: 0 !important; + } + + .mr-lg-0, + .mx-lg-0 { + margin-right: 0 !important; + } + + .mb-lg-0, + .my-lg-0 { + margin-bottom: 0 !important; + } + + .ml-lg-0, + .mx-lg-0 { + margin-left: 0 !important; + } + + .m-lg-1 { + margin: 0.25rem !important; + } + + .mt-lg-1, + .my-lg-1 { + margin-top: 0.25rem !important; + } + + .mr-lg-1, + .mx-lg-1 { + margin-right: 0.25rem !important; + } + + .mb-lg-1, + .my-lg-1 { + margin-bottom: 0.25rem !important; + } + + .ml-lg-1, + .mx-lg-1 { + margin-left: 0.25rem !important; + } + + .m-lg-2 { + margin: 0.5rem !important; + } + + .mt-lg-2, + .my-lg-2 { + margin-top: 0.5rem !important; + } + + .mr-lg-2, + .mx-lg-2 { + margin-right: 0.5rem !important; + } + + .mb-lg-2, + .my-lg-2 { + margin-bottom: 0.5rem !important; + } + + .ml-lg-2, + .mx-lg-2 { + margin-left: 0.5rem !important; + } + + .m-lg-3 { + margin: 1rem !important; + } + + .mt-lg-3, + .my-lg-3 { + margin-top: 1rem !important; + } + + .mr-lg-3, + .mx-lg-3 { + margin-right: 1rem !important; + } + + .mb-lg-3, + .my-lg-3 { + margin-bottom: 1rem !important; + } + + .ml-lg-3, + .mx-lg-3 { + margin-left: 1rem !important; + } + + .m-lg-4 { + margin: 1.5rem !important; + } + + .mt-lg-4, + .my-lg-4 { + margin-top: 1.5rem !important; + } + + .mr-lg-4, + .mx-lg-4 { + margin-right: 1.5rem !important; + } + + .mb-lg-4, + .my-lg-4 { + margin-bottom: 1.5rem !important; + } + + .ml-lg-4, + .mx-lg-4 { + margin-left: 1.5rem !important; + } + + .m-lg-5 { + margin: 3rem !important; + } + + .mt-lg-5, + .my-lg-5 { + margin-top: 3rem !important; + } + + .mr-lg-5, + .mx-lg-5 { + margin-right: 3rem !important; + } + + .mb-lg-5, + .my-lg-5 { + margin-bottom: 3rem !important; + } + + .ml-lg-5, + .mx-lg-5 { + margin-left: 3rem !important; + } + + .p-lg-0 { + padding: 0 !important; + } + + .pt-lg-0, + .py-lg-0 { + padding-top: 0 !important; + } + + .pr-lg-0, + .px-lg-0 { + padding-right: 0 !important; + } + + .pb-lg-0, + .py-lg-0 { + padding-bottom: 0 !important; + } + + .pl-lg-0, + .px-lg-0 { + padding-left: 0 !important; + } + + .p-lg-1 { + padding: 0.25rem !important; + } + + .pt-lg-1, + .py-lg-1 { + padding-top: 0.25rem !important; + } + + .pr-lg-1, + .px-lg-1 { + padding-right: 0.25rem !important; + } + + .pb-lg-1, + .py-lg-1 { + padding-bottom: 0.25rem !important; + } + + .pl-lg-1, + .px-lg-1 { + padding-left: 0.25rem !important; + } + + .p-lg-2 { + padding: 0.5rem !important; + } + + .pt-lg-2, + .py-lg-2 { + padding-top: 0.5rem !important; + } + + .pr-lg-2, + .px-lg-2 { + padding-right: 0.5rem !important; + } + + .pb-lg-2, + .py-lg-2 { + padding-bottom: 0.5rem !important; + } + + .pl-lg-2, + .px-lg-2 { + padding-left: 0.5rem !important; + } + + .p-lg-3 { + padding: 1rem !important; + } + + .pt-lg-3, + .py-lg-3 { + padding-top: 1rem !important; + } + + .pr-lg-3, + .px-lg-3 { + padding-right: 1rem !important; + } + + .pb-lg-3, + .py-lg-3 { + padding-bottom: 1rem !important; + } + + .pl-lg-3, + .px-lg-3 { + padding-left: 1rem !important; + } + + .p-lg-4 { + padding: 1.5rem !important; + } + + .pt-lg-4, + .py-lg-4 { + padding-top: 1.5rem !important; + } + + .pr-lg-4, + .px-lg-4 { + padding-right: 1.5rem !important; + } + + .pb-lg-4, + .py-lg-4 { + padding-bottom: 1.5rem !important; + } + + .pl-lg-4, + .px-lg-4 { + padding-left: 1.5rem !important; + } + + .p-lg-5 { + padding: 3rem !important; + } + + .pt-lg-5, + .py-lg-5 { + padding-top: 3rem !important; + } + + .pr-lg-5, + .px-lg-5 { + padding-right: 3rem !important; + } + + .pb-lg-5, + .py-lg-5 { + padding-bottom: 3rem !important; + } + + .pl-lg-5, + .px-lg-5 { + padding-left: 3rem !important; + } + + .m-lg-auto { + margin: auto !important; + } + + .mt-lg-auto, + .my-lg-auto { + margin-top: auto !important; + } + + .mr-lg-auto, + .mx-lg-auto { + margin-right: auto !important; + } + + .mb-lg-auto, + .my-lg-auto { + margin-bottom: auto !important; + } + + .ml-lg-auto, + .mx-lg-auto { + margin-left: auto !important; + } +} +@media (min-width: 1200px) { + .m-xl-0 { + margin: 0 !important; + } + + .mt-xl-0, + .my-xl-0 { + margin-top: 0 !important; + } + + .mr-xl-0, + .mx-xl-0 { + margin-right: 0 !important; + } + + .mb-xl-0, + .my-xl-0 { + margin-bottom: 0 !important; + } + + .ml-xl-0, + .mx-xl-0 { + margin-left: 0 !important; + } + + .m-xl-1 { + margin: 0.25rem !important; + } + + .mt-xl-1, + .my-xl-1 { + margin-top: 0.25rem !important; + } + + .mr-xl-1, + .mx-xl-1 { + margin-right: 0.25rem !important; + } + + .mb-xl-1, + .my-xl-1 { + margin-bottom: 0.25rem !important; + } + + .ml-xl-1, + .mx-xl-1 { + margin-left: 0.25rem !important; + } + + .m-xl-2 { + margin: 0.5rem !important; + } + + .mt-xl-2, + .my-xl-2 { + margin-top: 0.5rem !important; + } + + .mr-xl-2, + .mx-xl-2 { + margin-right: 0.5rem !important; + } + + .mb-xl-2, + .my-xl-2 { + margin-bottom: 0.5rem !important; + } + + .ml-xl-2, + .mx-xl-2 { + margin-left: 0.5rem !important; + } + + .m-xl-3 { + margin: 1rem !important; + } + + .mt-xl-3, + .my-xl-3 { + margin-top: 1rem !important; + } + + .mr-xl-3, + .mx-xl-3 { + margin-right: 1rem !important; + } + + .mb-xl-3, + .my-xl-3 { + margin-bottom: 1rem !important; + } + + .ml-xl-3, + .mx-xl-3 { + margin-left: 1rem !important; + } + + .m-xl-4 { + margin: 1.5rem !important; + } + + .mt-xl-4, + .my-xl-4 { + margin-top: 1.5rem !important; + } + + .mr-xl-4, + .mx-xl-4 { + margin-right: 1.5rem !important; + } + + .mb-xl-4, + .my-xl-4 { + margin-bottom: 1.5rem !important; + } + + .ml-xl-4, + .mx-xl-4 { + margin-left: 1.5rem !important; + } + + .m-xl-5 { + margin: 3rem !important; + } + + .mt-xl-5, + .my-xl-5 { + margin-top: 3rem !important; + } + + .mr-xl-5, + .mx-xl-5 { + margin-right: 3rem !important; + } + + .mb-xl-5, + .my-xl-5 { + margin-bottom: 3rem !important; + } + + .ml-xl-5, + .mx-xl-5 { + margin-left: 3rem !important; + } + + .p-xl-0 { + padding: 0 !important; + } + + .pt-xl-0, + .py-xl-0 { + padding-top: 0 !important; + } + + .pr-xl-0, + .px-xl-0 { + padding-right: 0 !important; + } + + .pb-xl-0, + .py-xl-0 { + padding-bottom: 0 !important; + } + + .pl-xl-0, + .px-xl-0 { + padding-left: 0 !important; + } + + .p-xl-1 { + padding: 0.25rem !important; + } + + .pt-xl-1, + .py-xl-1 { + padding-top: 0.25rem !important; + } + + .pr-xl-1, + .px-xl-1 { + padding-right: 0.25rem !important; + } + + .pb-xl-1, + .py-xl-1 { + padding-bottom: 0.25rem !important; + } + + .pl-xl-1, + .px-xl-1 { + padding-left: 0.25rem !important; + } + + .p-xl-2 { + padding: 0.5rem !important; + } + + .pt-xl-2, + .py-xl-2 { + padding-top: 0.5rem !important; + } + + .pr-xl-2, + .px-xl-2 { + padding-right: 0.5rem !important; + } + + .pb-xl-2, + .py-xl-2 { + padding-bottom: 0.5rem !important; + } + + .pl-xl-2, + .px-xl-2 { + padding-left: 0.5rem !important; + } + + .p-xl-3 { + padding: 1rem !important; + } + + .pt-xl-3, + .py-xl-3 { + padding-top: 1rem !important; + } + + .pr-xl-3, + .px-xl-3 { + padding-right: 1rem !important; + } + + .pb-xl-3, + .py-xl-3 { + padding-bottom: 1rem !important; + } + + .pl-xl-3, + .px-xl-3 { + padding-left: 1rem !important; + } + + .p-xl-4 { + padding: 1.5rem !important; + } + + .pt-xl-4, + .py-xl-4 { + padding-top: 1.5rem !important; + } + + .pr-xl-4, + .px-xl-4 { + padding-right: 1.5rem !important; + } + + .pb-xl-4, + .py-xl-4 { + padding-bottom: 1.5rem !important; + } + + .pl-xl-4, + .px-xl-4 { + padding-left: 1.5rem !important; + } + + .p-xl-5 { + padding: 3rem !important; + } + + .pt-xl-5, + .py-xl-5 { + padding-top: 3rem !important; + } + + .pr-xl-5, + .px-xl-5 { + padding-right: 3rem !important; + } + + .pb-xl-5, + .py-xl-5 { + padding-bottom: 3rem !important; + } + + .pl-xl-5, + .px-xl-5 { + padding-left: 3rem !important; + } + + .m-xl-auto { + margin: auto !important; + } + + .mt-xl-auto, + .my-xl-auto { + margin-top: auto !important; + } + + .mr-xl-auto, + .mx-xl-auto { + margin-right: auto !important; + } + + .mb-xl-auto, + .my-xl-auto { + margin-bottom: auto !important; + } + + .ml-xl-auto, + .mx-xl-auto { + margin-left: auto !important; + } +} +.text-justify { + text-align: justify !important; +} + +.text-nowrap { + white-space: nowrap !important; +} + +.text-truncate { + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.text-left { + text-align: left !important; +} + +.text-right { + text-align: right !important; +} + +.text-center { + text-align: center !important; +} + +@media (min-width: 576px) { + .text-sm-left { + text-align: left !important; + } + + .text-sm-right { + text-align: right !important; + } + + .text-sm-center { + text-align: center !important; + } +} +@media (min-width: 768px) { + .text-md-left { + text-align: left !important; + } + + .text-md-right { + text-align: right !important; + } + + .text-md-center { + text-align: center !important; + } +} +@media (min-width: 992px) { + .text-lg-left { + text-align: left !important; + } + + .text-lg-right { + text-align: right !important; + } + + .text-lg-center { + text-align: center !important; + } +} +@media (min-width: 1200px) { + .text-xl-left { + text-align: left !important; + } + + .text-xl-right { + text-align: right !important; + } + + .text-xl-center { + text-align: center !important; + } +} +.text-lowercase { + text-transform: lowercase !important; +} + +.text-uppercase { + text-transform: uppercase !important; +} + +.text-capitalize { + text-transform: capitalize !important; +} + +.font-weight-light { + font-weight: 300 !important; +} + +.font-weight-normal { + font-weight: 400 !important; +} + +.font-weight-bold { + font-weight: 700 !important; +} + +.font-italic { + font-style: italic !important; +} + +.text-white { + color: #fff !important; +} + +.text-primary { + color: #007bff !important; +} + +a.text-primary:hover, a.text-primary:focus { + color: #0062cc !important; +} + +.text-secondary { + color: #6c757d !important; +} + +a.text-secondary:hover, a.text-secondary:focus { + color: #545b62 !important; +} + +.text-success { + color: #28a745 !important; +} + +a.text-success:hover, a.text-success:focus { + color: #1e7e34 !important; +} + +.text-info { + color: #17a2b8 !important; +} + +a.text-info:hover, a.text-info:focus { + color: #117a8b !important; +} + +.text-warning { + color: #ffc107 !important; +} + +a.text-warning:hover, a.text-warning:focus { + color: #d39e00 !important; +} + +.text-danger { + color: #dc3545 !important; +} + +a.text-danger:hover, a.text-danger:focus { + color: #bd2130 !important; +} + +.text-light { + color: #f8f9fa !important; +} + +a.text-light:hover, a.text-light:focus { + color: #dae0e5 !important; +} + +.text-dark { + color: #343a40 !important; +} + +a.text-dark:hover, a.text-dark:focus { + color: #1d2124 !important; +} + +.text-muted { + color: #6c757d !important; +} + +.text-hide { + font: 0/0 a; + color: transparent; + text-shadow: none; + background-color: transparent; + border: 0; +} + +.visible { + visibility: visible !important; +} + +.invisible { + visibility: hidden !important; +} + +@media print { + *, + *::before, + *::after { + text-shadow: none !important; + -webkit-box-shadow: none !important; + box-shadow: none !important; + } + + a:not(.btn) { + text-decoration: underline; + } + + abbr[title]::after { + content: " (" attr(title) ")"; + } + + pre { + white-space: pre-wrap !important; + } + + pre, + blockquote { + border: 1px solid #999; + page-break-inside: avoid; + } + + thead { + display: table-header-group; + } + + tr, + img { + page-break-inside: avoid; + } + + p, + h2, + h3 { + orphans: 3; + widows: 3; + } + + h2, + h3 { + page-break-after: avoid; + } + + @page { + size: a3; + } + body { + min-width: 992px !important; + } + + .container { + min-width: 992px !important; + } + + .navbar { + display: none; + } + + .badge { + border: 1px solid #000; + } + + .table { + border-collapse: collapse !important; + } + .table td, + .table th { + background-color: #fff !important; + } + + .table-bordered th, + .table-bordered td { + border: 1px solid #ddd !important; + } +} +/*Github syntax highlighting theme via Rouge*/ +.highlight table td { + padding: 5px; +} + +.highlight table pre { + margin: 0; +} + +.highlight .cm { + color: #999988; + font-style: italic; +} + +.highlight .cp { + color: #999999; + font-weight: bold; +} + +.highlight .c1 { + color: #999988; + font-style: italic; +} + +.highlight .cs { + color: #999999; + font-weight: bold; + font-style: italic; +} + +.highlight .c, .highlight .cd { + color: #999988; + font-style: italic; +} + +.highlight .err { + color: #a61717; + background-color: #e3d2d2; +} + +.highlight .gd { + color: #000000; + background-color: #ffdddd; +} + +.highlight .ge { + color: #000000; + font-style: italic; +} + +.highlight .gr { + color: #aa0000; +} + +.highlight .gh { + color: #999999; +} + +.highlight .gi { + color: #000000; + background-color: #ddffdd; +} + +.highlight .go { + color: #888888; +} + +.highlight .gp { + color: #555555; +} + +.highlight .gs { + font-weight: bold; +} + +.highlight .gu { + color: #aaaaaa; +} + +.highlight .gt { + color: #aa0000; +} + +.highlight .kc { + color: #000000; + font-weight: bold; +} + +.highlight .kd { + color: #000000; + font-weight: bold; +} + +.highlight .kn { + color: #000000; + font-weight: bold; +} + +.highlight .kp { + color: #000000; + font-weight: bold; +} + +.highlight .kr { + color: #000000; + font-weight: bold; +} + +.highlight .kt { + color: #445588; + font-weight: bold; +} + +.highlight .k, .highlight .kv { + color: #000000; + font-weight: bold; +} + +.highlight .mf { + color: #009999; +} + +.highlight .mh { + color: #009999; +} + +.highlight .il { + color: #009999; +} + +.highlight .mi { + color: #009999; +} + +.highlight .mo { + color: #009999; +} + +.highlight .m, .highlight .mb, .highlight .mx { + color: #009999; +} + +.highlight .sb { + color: #d14; +} + +.highlight .sc { + color: #d14; +} + +.highlight .sd { + color: #d14; +} + +.highlight .s2 { + color: #d14; +} + +.highlight .se { + color: #d14; +} + +.highlight .sh { + color: #d14; +} + +.highlight .si { + color: #d14; +} + +.highlight .sx { + color: #d14; +} + +.highlight .sr { + color: #009926; +} + +.highlight .s1 { + color: #d14; +} + +.highlight .ss { + color: #990073; +} + +.highlight .s { + color: #d14; +} + +.highlight .na { + color: #008080; +} + +.highlight .bp { + color: #525252; +} + +.highlight .nb { + color: #0086B3; +} + +.highlight .nc { + color: #445588; + font-weight: bold; +} + +.highlight .no { + color: #008080; +} + +.highlight .nd { + color: #3c5d5d; + font-weight: bold; +} + +.highlight .ni { + color: #800080; +} + +.highlight .ne { + color: #990000; + font-weight: bold; +} + +.highlight .nf { + color: #990000; + font-weight: bold; +} + +.highlight .nl { + color: #990000; + font-weight: bold; +} + +.highlight .nn { + color: #555555; +} + +.highlight .nt { + color: #000080; +} + +.highlight .vc { + color: #008080; +} + +.highlight .vg { + color: #008080; +} + +.highlight .vi { + color: #008080; +} + +.highlight .nv { + color: #008080; +} + +.highlight .ow { + color: #000000; + font-weight: bold; +} + +.highlight .o { + color: #000000; + font-weight: bold; +} + +.highlight .n { + color: #000000; + font-weight: bold; +} + +.highlight .p { + color: #000000; + font-weight: bold; +} + +.highlight .w { + color: #bbbbbb; +} + +.highlight { + background-color: #f8f8f8; +} + +@font-face { + font-family: FreightSans; + font-weight: 700; + font-style: normal; + src: url("../fonts/FreightSans/freight-sans-bold.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-bold.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 700; + font-style: italic; + src: url("../fonts/FreightSans/freight-sans-bold-italic.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-bold-italic.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 500; + font-style: normal; + src: url("../fonts/FreightSans/freight-sans-medium.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-medium.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 500; + font-style: italic; + src: url("../fonts/FreightSans/freight-sans-medium-italic.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-medium-italic.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 100; + font-style: normal; + src: url("../fonts/FreightSans/freight-sans-light.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-light.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 100; + font-style: italic; + src: url("../fonts/FreightSans/freight-sans-light-italic.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-light-italic.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 400; + font-style: italic; + src: url("../fonts/FreightSans/freight-sans-book-italic.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-book-italic.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 400; + font-style: normal; + src: url("../fonts/FreightSans/freight-sans-book.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-book.woff") format("woff"); +} +@font-face { + font-family: IBMPlexMono; + font-weight: 600; + font-style: normal; + unicode-range: u+0020-007f; + src: local("IBMPlexMono-SemiBold"), url("../fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2") format("woff2"), url("../fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff") format("woff"); +} +@font-face { + font-family: IBMPlexMono; + font-weight: 500; + font-style: normal; + unicode-range: u+0020-007f; + src: local("IBMPlexMono-Medium"), url("../fonts/IBMPlexMono/IBMPlexMono-Medium.woff2") format("woff2"), url("../fonts/IBMPlexMono/IBMPlexMono-Medium.woff") format("woff"); +} +@font-face { + font-family: IBMPlexMono; + font-weight: 400; + font-style: normal; + unicode-range: u+0020-007f; + src: local("IBMPlexMono-Regular"), url("../fonts/IBMPlexMono/IBMPlexMono-Regular.woff2") format("woff2"), url("../fonts/IBMPlexMono/IBMPlexMono-Regular.woff") format("woff"); +} +@font-face { + font-family: IBMPlexMono; + font-weight: 300; + font-style: normal; + unicode-range: u+0020-007f; + src: local("IBMPlexMono-Light"), url("../fonts/IBMPlexMono/IBMPlexMono-Light.woff2") format("woff2"), url("../fonts/IBMPlexMono/IBMPlexMono-Light.woff") format("woff"); +} +html { + position: relative; + min-height: 100%; + font-size: 12px; +} +@media screen and (min-width: 768px) { + html { + font-size: 16px; + } +} + +* { + -webkit-box-sizing: border-box; + box-sizing: border-box; +} + +body { + font-family: FreightSans, Helvetica Neue, Helvetica, Arial, sans-serif; +} + +a:link, +a:visited, +a:hover { + text-decoration: none; + color: #e44c2c; +} + +a.with-right-arrow, .btn.with-right-arrow { + padding-right: 1.375rem; + position: relative; + background-image: url("../images/chevron-right-orange.svg"); + background-size: 6px 13px; + background-position: center right 5px; + background-repeat: no-repeat; +} +@media screen and (min-width: 768px) { + a.with-right-arrow, .btn.with-right-arrow { + background-size: 8px 14px; + background-position: center right 12px; + padding-right: 2rem; + } +} + +::-webkit-input-placeholder { + color: #e44c2c; +} + +::-moz-placeholder { + color: #e44c2c; +} + +:-ms-input-placeholder { + color: #e44c2c; +} + +:-moz-placeholder { + color: #e44c2c; +} + +.email-subscribe-form input.email { + color: #e44c2c; + border: none; + border-bottom: 1px solid #939393; + width: 100%; + background-color: transparent; + outline: none; + font-size: 1.125rem; + letter-spacing: 0.25px; + line-height: 2.25rem; +} +.email-subscribe-form input[type="submit"] { + position: absolute; + right: 0; + top: 10px; + height: 15px; + width: 15px; + background-image: url("../images/arrow-right-with-tail.svg"); + background-color: transparent; + background-repeat: no-repeat; + background-size: 15px 15px; + background-position: center center; + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; + border: 0; +} + +.email-subscribe-form-fields-wrapper { + position: relative; +} + +.anchorjs-link { + color: #6c6c6d !important; +} +@media screen and (min-width: 768px) { + .anchorjs-link:hover { + color: inherit; + text-decoration: none !important; + } +} + +.pytorch-article #table-of-contents { + display: none; +} + +code, kbd, pre, samp { + font-family: IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace; +} +code span, kbd span, pre span, samp span { + font-family: IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace; +} + +pre { + padding: 1.125rem; + background-color: #f3f4f7; +} +pre code { + font-size: 0.875rem; +} +pre.highlight { + background-color: #f3f4f7; + line-height: 1.3125rem; +} + +code.highlighter-rouge { + color: #6c6c6d; + background-color: #f3f4f7; + padding: 2px 6px; +} + +a:link code.highlighter-rouge, +a:visited code.highlighter-rouge, +a:hover code.highlighter-rouge { + color: #4974D1; +} +a:link.has-code, +a:visited.has-code, +a:hover.has-code { + color: #4974D1; +} + +p code, +h1 code, +h2 code, +h3 code, +h4 code, +h5 code, +h6 code { + font-size: 78.5%; +} + +pre { + white-space: pre-wrap; + white-space: -moz-pre-wrap; + white-space: -pre-wrap; + white-space: -o-pre-wrap; + word-wrap: break-word; +} + +.header-holder { + height: 68px; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + left: 0; + margin-left: auto; + margin-right: auto; + position: fixed; + right: 0; + top: 0; + width: 100%; + z-index: 9999; + background-color: #ffffff; + border-bottom: 1px solid #e2e2e2; +} +@media screen and (min-width: 1100px) { + .header-holder { + height: 90px; + } +} + +.header-container { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} +.header-container:before, .header-container:after { + content: ""; + display: table; +} +.header-container:after { + clear: both; +} +.header-container { + *zoom: 1; +} +@media screen and (min-width: 1100px) { + .header-container { + display: block; + } +} + +.header-logo { + height: 23px; + width: 93px; + background-image: url("../images/logo.svg"); + background-repeat: no-repeat; + background-size: 93px 23px; + display: block; + float: left; + z-index: 10; +} +@media screen and (min-width: 1100px) { + .header-logo { + background-size: 108px 27px; + position: absolute; + height: 27px; + width: 108px; + top: 4px; + float: none; + } +} + +.main-menu-open-button { + background-image: url("../images/icon-menu-dots.svg"); + background-position: center center; + background-size: 25px 7px; + background-repeat: no-repeat; + width: 25px; + height: 17px; + position: absolute; + right: 0; + top: 4px; +} +@media screen and (min-width: 1100px) { + .main-menu-open-button { + display: none; + } +} + +.header-holder .main-menu { + display: none; +} +@media screen and (min-width: 1100px) { + .header-holder .main-menu { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: end; + -ms-flex-pack: end; + justify-content: flex-end; + } +} +.header-holder .main-menu ul { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + margin: 0; +} +.header-holder .main-menu ul li { + display: inline-block; + margin-right: 40px; + position: relative; +} +.header-holder .main-menu ul li.active:after { + content: "•"; + bottom: -24px; + color: #e44c2c; + font-size: 1.375rem; + left: 0; + position: absolute; + right: 0; + text-align: center; +} +.header-holder .main-menu ul li.active a { + color: #e44c2c; +} +.header-holder .main-menu ul li.docs-active:after { + content: "•"; + bottom: -24px; + color: #e44c2c; + font-size: 1.375rem; + left: -24px; + position: absolute; + right: 0; + text-align: center; +} +.header-holder .main-menu ul li:last-of-type { + margin-right: 0; +} +.header-holder .main-menu ul li a { + color: #ffffff; + font-size: 1.3rem; + letter-spacing: 0; + line-height: 2.125rem; + text-align: center; + text-decoration: none; +} +@media screen and (min-width: 1100px) { + .header-holder .main-menu ul li a:hover { + color: #e44c2c; + } +} + +.mobile-main-menu { + display: none; +} +.mobile-main-menu.open { + background-color: #262626; + display: block; + height: 100%; + left: 0; + margin-left: auto; + margin-right: auto; + min-height: 100%; + position: fixed; + right: 0; + top: 0; + width: 100%; + z-index: 99999; +} + +.mobile-main-menu .container-fluid { + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + height: 68px; + position: relative; +} +.mobile-main-menu .container-fluid:before, .mobile-main-menu .container-fluid:after { + content: ""; + display: table; +} +.mobile-main-menu .container-fluid:after { + clear: both; +} +.mobile-main-menu .container-fluid { + *zoom: 1; +} + +.mobile-main-menu.open ul { + list-style-type: none; + padding: 0; +} +.mobile-main-menu.open ul li a, .mobile-main-menu.open .resources-mobile-menu-title { + font-size: 2rem; + color: #ffffff; + letter-spacing: 0; + line-height: 4rem; + text-decoration: none; +} +.mobile-main-menu.open ul li.active a { + color: #e44c2c; +} + +.main-menu-close-button { + background-image: url("../images/icon-close.svg"); + background-position: center center; + background-repeat: no-repeat; + background-size: 24px 24px; + height: 24px; + position: absolute; + right: 0; + width: 24px; + top: -4px; +} + +.mobile-main-menu-header-container { + position: relative; +} + +.mobile-main-menu-links-container { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + padding-left: 2.8125rem; + height: 90vh; + margin-top: -25px; + padding-top: 50%; + overflow-y: scroll; +} +.mobile-main-menu-links-container .main-menu { + height: 100vh; +} + +.mobile-main-menu-links-container ul.resources-mobile-menu-items li { + padding-left: 15px; +} + +.site-footer { + padding: 2.5rem 0; + width: 100%; + background: #000000; + background-size: 100%; + margin-left: 0; + margin-right: 0; + position: relative; + z-index: 201; +} +@media screen and (min-width: 768px) { + .site-footer { + padding: 5rem 0; + } +} +.site-footer p { + color: #ffffff; +} +.site-footer ul { + list-style-type: none; + padding-left: 0; + margin-bottom: 0; +} +.site-footer ul li { + font-size: 1.125rem; + line-height: 2rem; + color: #A0A0A1; + padding-bottom: 0.375rem; +} +.site-footer ul li.list-title { + padding-bottom: 0.75rem; + color: #ffffff; +} +.site-footer a:link, +.site-footer a:visited { + color: inherit; +} +@media screen and (min-width: 768px) { + .site-footer a:hover { + color: #e44c2c; + } +} + +.docs-tutorials-resources { + background-color: #262626; + color: #ffffff; + padding-top: 2.5rem; + padding-bottom: 2.5rem; + position: relative; + z-index: 201; +} +@media screen and (min-width: 768px) { + .docs-tutorials-resources { + padding-top: 5rem; + padding-bottom: 5rem; + } +} +.docs-tutorials-resources p { + color: #929292; + font-size: 1.125rem; +} +.docs-tutorials-resources h2 { + font-size: 1.5rem; + letter-spacing: -0.25px; + text-transform: none; + margin-bottom: 0.25rem; +} +@media screen and (min-width: 768px) { + .docs-tutorials-resources h2 { + margin-bottom: 1.25rem; + } +} +.docs-tutorials-resources .col-md-4 { + margin-bottom: 2rem; + text-align: center; +} +@media screen and (min-width: 768px) { + .docs-tutorials-resources .col-md-4 { + margin-bottom: 0; + } +} +.docs-tutorials-resources .with-right-arrow { + margin-left: 12px; +} +.docs-tutorials-resources .with-right-arrow:hover { + background-image: url("../images/chevron-right-white.svg"); +} +.docs-tutorials-resources p { + font-size: 1rem; + line-height: 1.5rem; + letter-spacing: 0.22px; + color: #939393; + margin-bottom: 0; +} +@media screen and (min-width: 768px) { + .docs-tutorials-resources p { + margin-bottom: 1.25rem; + } +} +.docs-tutorials-resources a { + font-size: 1.125rem; + color: #e44c2c; +} +.docs-tutorials-resources a:hover { + color: #ffffff; +} + +.footer-container { + position: relative; +} + +@media screen and (min-width: 768px) { + .footer-logo-wrapper { + position: absolute; + top: 0; + left: 30px; + } +} + +.footer-logo { + background-image: url("../images/logo-icon.svg"); + background-position: center; + background-repeat: no-repeat; + background-size: 20px 24px; + display: block; + height: 24px; + margin-bottom: 2.8125rem; + width: 20px; +} +@media screen and (min-width: 768px) { + .footer-logo { + background-size: 29px 36px; + height: 36px; + margin-bottom: 0; + margin-bottom: 0; + width: 29px; + } +} + +.footer-links-wrapper { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; +} +@media screen and (min-width: 768px) { + .footer-links-wrapper { + -ms-flex-wrap: initial; + flex-wrap: initial; + -webkit-box-pack: end; + -ms-flex-pack: end; + justify-content: flex-end; + } +} + +.footer-links-col { + margin-bottom: 3.75rem; + width: 50%; +} +@media screen and (min-width: 768px) { + .footer-links-col { + margin-bottom: 0; + width: 14%; + margin-right: 23px; + } + .footer-links-col.follow-us-col { + width: 18%; + margin-right: 0; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + .footer-links-col { + width: 18%; + margin-right: 30px; + } +} + +.footer-social-icons { + margin: 8.5625rem 0 2.5rem 0; +} +.footer-social-icons a { + height: 32px; + width: 32px; + display: inline-block; + background-color: #CCCDD1; + border-radius: 50%; + margin-right: 5px; +} +.footer-social-icons a.facebook { + background-image: url("../images/logo-facebook-dark.svg"); + background-position: center center; + background-size: 9px 18px; + background-repeat: no-repeat; +} +.footer-social-icons a.twitter { + background-image: url("../images/logo-twitter-dark.svg"); + background-position: center center; + background-size: 17px 17px; + background-repeat: no-repeat; +} +.footer-social-icons a.youtube { + background-image: url("../images/logo-youtube-dark.svg"); + background-position: center center; + background-repeat: no-repeat; +} + +.site-footer .mc-field-group { + margin-top: -2px; +} + +article.pytorch-article { + max-width: 920px; + margin: 0 auto; +} +article.pytorch-article h2, +article.pytorch-article h3, +article.pytorch-article h4, +article.pytorch-article h5, +article.pytorch-article h6 { + margin: 1.375rem 0; + color: #262626; +} +article.pytorch-article h2 { + font-size: 1.625rem; + letter-spacing: 1.33px; + line-height: 2rem; + text-transform: none; +} +article.pytorch-article h3 { + font-size: 1.5rem; + letter-spacing: -0.25px; + line-height: 1.875rem; + text-transform: none; +} +article.pytorch-article h4, +article.pytorch-article h5, +article.pytorch-article h6 { + font-size: 1.125rem; + letter-spacing: -0.19px; + line-height: 1.875rem; +} +article.pytorch-article p { + margin-bottom: 1.125rem; +} +article.pytorch-article p, +article.pytorch-article ul li, +article.pytorch-article ol li, +article.pytorch-article dl dt, +article.pytorch-article dl dd, +article.pytorch-article blockquote { + font-size: 1rem; + line-height: 1.375rem; + color: #262626; + letter-spacing: 0.01px; + font-weight: 500; +} +article.pytorch-article table { + margin-bottom: 2.5rem; + width: 100%; +} +article.pytorch-article table thead { + border-bottom: 1px solid #cacaca; +} +article.pytorch-article table th { + padding: 0.625rem; + color: #262626; +} +article.pytorch-article table td { + padding: 0.3125rem; +} +article.pytorch-article table tr th:first-of-type, +article.pytorch-article table tr td:first-of-type { + padding-left: 0; +} +article.pytorch-article table.docutils.field-list th.field-name { + padding: 0.3125rem; + padding-left: 0; +} +article.pytorch-article table.docutils.field-list td.field-body { + padding: 0.3125rem; +} +article.pytorch-article table.docutils.field-list td.field-body p:last-of-type { + margin-bottom: 0; +} +article.pytorch-article ul, +article.pytorch-article ol { + margin: 1.5rem 0 3.125rem 0; +} +@media screen and (min-width: 768px) { + article.pytorch-article ul, + article.pytorch-article ol { + padding-left: 6.25rem; + } +} +article.pytorch-article ul li, +article.pytorch-article ol li { + margin-bottom: 0.625rem; +} +article.pytorch-article dl { + margin-bottom: 1.5rem; +} +article.pytorch-article dl dt { + margin-bottom: 0.75rem; +} +article.pytorch-article pre { + margin-bottom: 2.5rem; +} +article.pytorch-article hr { + margin-top: 4.6875rem; + margin-bottom: 4.6875rem; +} +article.pytorch-article blockquote { + margin: 0 auto; + margin-bottom: 2.5rem; + width: 65%; +} +article.pytorch-article img { + width: 100%; +} + +html { + height: 100%; +} +@media screen and (min-width: 768px) { + html { + font-size: 16px; + } +} + +body { + background: #ffffff; + height: 100%; + margin: 0; +} +body.no-scroll { + height: 100%; + overflow: hidden; +} + +p { + margin-top: 0; + margin-bottom: 1.125rem; +} +p a:link, +p a:visited, +p a:hover { + color: #e44c2c; + text-decoration: none; +} +@media screen and (min-width: 768px) { + p a:hover { + text-decoration: underline; + } +} +p a:link, +p a:visited, +p a:hover { + color: #ee4c2c; +} + +.wy-breadcrumbs li a { + color: #ee4c2c; +} + +ul.pytorch-breadcrumbs { + padding-left: 0; + list-style-type: none; +} +ul.pytorch-breadcrumbs li { + display: inline-block; + font-size: 0.875rem; +} +ul.pytorch-breadcrumbs a { + color: #ee4c2c; + text-decoration: none; +} + +.table-of-contents-link-wrapper { + display: block; + margin-top: 0; + padding: 1.25rem 1.875rem; + background-color: #f3f4f7; + position: relative; + color: #262626; + font-size: 1.25rem; +} +.table-of-contents-link-wrapper.is-open .toggle-table-of-contents { + -webkit-transform: rotate(180deg); + transform: rotate(180deg); +} +@media screen and (min-width: 1100px) { + .table-of-contents-link-wrapper { + display: none; + } +} + +.toggle-table-of-contents { + background-image: url("../images/chevron-down-grey.svg"); + background-position: center center; + background-repeat: no-repeat; + background-size: 18px 18px; + height: 100%; + position: absolute; + right: 21px; + width: 30px; + top: 0; +} + +.tutorials-header .header-logo { + background-image: url("../images/logo-dark.svg"); +} +.tutorials-header .main-menu ul li a { + color: #262626; +} +.tutorials-header .main-menu-open-button { + background-image: url("../images/icon-menu-dots-dark.svg"); +} + +.rst-content footer .rating-hr.hr-top { + margin-bottom: -0.0625rem; +} +.rst-content footer .rating-hr.hr-bottom { + margin-top: -0.0625rem; +} +.rst-content footer .rating-container { + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; + font-size: 1.125rem; +} +.rst-content footer .rating-container .rating-prompt, .rst-content footer .rating-container .was-helpful-thank-you { + padding: 0.625rem 1.25rem 0.625rem 1.25rem; +} +.rst-content footer .rating-container .was-helpful-thank-you { + display: none; +} +.rst-content footer .rating-container .rating-prompt.yes-link, .rst-content footer .rating-container .rating-prompt.no-link { + color: #e44c2c; + cursor: pointer; +} +.rst-content footer .rating-container .rating-prompt.yes-link:hover, .rst-content footer .rating-container .rating-prompt.no-link:hover { + background-color: #e44c2c; + color: #ffffff; +} +.rst-content footer .rating-container .stars-outer { + display: inline-block; + position: relative; + font-family: FontAwesome; + padding: 0.625rem 1.25rem 0.625rem 1.25rem; +} +.rst-content footer .rating-container .stars-outer i { + cursor: pointer; +} +.rst-content footer .rating-container .stars-outer .star-fill { + color: #ee4c2c; +} +.rst-content footer div[role="contentinfo"] { + padding-top: 2.5rem; +} +.rst-content footer div[role="contentinfo"] p { + margin-bottom: 0; +} + +h1 { + font-size: 2rem; + letter-spacing: 1.78px; + line-height: 2.5rem; + text-transform: uppercase; + margin: 1.375rem 0; +} + +span.pre { + color: #6c6c6d; + background-color: #f3f4f7; + padding: 2px 6px; +} + +pre { + background-color: #f3f4f7; + padding: 1.375rem; +} + +.highlight .c1 { + color: #6c6c6d; +} + +.headerlink { + display: none !important; +} + +a:link.has-code, +a:hover.has-code, +a:visited.has-code { + color: #4974D1; +} +a:link.has-code span, +a:hover.has-code span, +a:visited.has-code span { + color: #4974D1; +} + +article.pytorch-article ul, +article.pytorch-article ol { + padding-left: 1.875rem; + margin: 0; +} +article.pytorch-article ul li, +article.pytorch-article ol li { + margin: 0; + line-height: 1.75rem; +} +article.pytorch-article ul p, +article.pytorch-article ol p { + line-height: 1.75rem; + margin-bottom: 0; +} +article.pytorch-article ul ul, +article.pytorch-article ul ol, +article.pytorch-article ol ul, +article.pytorch-article ol ol { + margin: 0; +} +article.pytorch-article h1, +article.pytorch-article h2, +article.pytorch-article h3, +article.pytorch-article h4, +article.pytorch-article h5, +article.pytorch-article h6 { + font-weight: normal; +} +article.pytorch-article h1 a, +article.pytorch-article h2 a, +article.pytorch-article h3 a, +article.pytorch-article h4 a, +article.pytorch-article h5 a, +article.pytorch-article h6 a { + color: #262626; +} +article.pytorch-article p.caption { + margin-top: 1.25rem; +} + +article.pytorch-article .section:first-of-type h1:first-of-type { + margin-top: 0; +} + +article.pytorch-article .sphx-glr-thumbcontainer { + margin: 0; + border: 1px solid #d6d7d8; + border-radius: 0; + width: 45%; + text-align: center; + margin-bottom: 5%; +} +@media screen and (max-width: 1100px) { + article.pytorch-article .sphx-glr-thumbcontainer:nth-child(odd) { + margin-left: 0; + margin-right: 2.5%; + } + article.pytorch-article .sphx-glr-thumbcontainer:nth-child(even) { + margin-right: 0; + margin-left: 2.5%; + } + article.pytorch-article .sphx-glr-thumbcontainer .figure { + width: 40%; + } +} +@media screen and (min-width: 1101px) { + article.pytorch-article .sphx-glr-thumbcontainer { + margin-right: 3%; + margin-bottom: 3%; + width: 30%; + } +} +article.pytorch-article .sphx-glr-thumbcontainer .caption-text a { + font-size: 1rem; + color: #262626; + letter-spacing: 0; + line-height: 1.5rem; + text-decoration: none; +} +article.pytorch-article .sphx-glr-thumbcontainer:hover { + -webkit-box-shadow: none; + box-shadow: none; + border-bottom-color: #ffffff; +} +article.pytorch-article .sphx-glr-thumbcontainer:hover .figure:before { + bottom: 100%; +} +article.pytorch-article .sphx-glr-thumbcontainer .figure { + width: 80%; +} +article.pytorch-article .sphx-glr-thumbcontainer .figure:before { + content: ""; + display: block; + position: absolute; + top: 0; + bottom: 35%; + left: 0; + right: 0; + background: #8A94B3; + opacity: 0.10; +} +article.pytorch-article .sphx-glr-thumbcontainer .figure a.reference.internal { + text-align: left; +} +@media screen and (min-width: 768px) { + article.pytorch-article .sphx-glr-thumbcontainer:after { + content: ""; + display: block; + width: 0; + height: 1px; + position: absolute; + bottom: 0; + left: 0; + background-color: #e44c2c; + -webkit-transition: width .250s ease-in-out; + transition: width .250s ease-in-out; + } + article.pytorch-article .sphx-glr-thumbcontainer:hover:after { + width: 100%; + } +} +@media screen and (min-width: 768px) { + article.pytorch-article .sphx-glr-thumbcontainer:after { + background-color: #ee4c2c; + } +} + +article.pytorch-article .section :not(dt) > code { + color: #262626; + border-top: solid 2px #ffffff; + background-color: #ffffff; + border-bottom: solid 2px #ffffff; + padding: 0px 3px; + -webkit-box-decoration-break: clone; + box-decoration-break: clone; +} +article.pytorch-article .section :not(dt) > code .pre { + outline: 0px; + padding: 0px; +} +article.pytorch-article .function dt, article.pytorch-article .attribute dt, article.pytorch-article .class .attribute dt, article.pytorch-article .class dt { + position: relative; + background: #f3f4f7; + padding: 0.5rem; + border-left: 3px solid #ee4c2c; + word-wrap: break-word; + padding-right: 100px; +} +article.pytorch-article .function dt em.property, article.pytorch-article .attribute dt em.property, article.pytorch-article .class dt em.property { + font-family: inherit; +} +article.pytorch-article .function dt em, article.pytorch-article .attribute dt em, article.pytorch-article .class .attribute dt em, article.pytorch-article .class dt em, article.pytorch-article .function dt .sig-paren, article.pytorch-article .attribute dt .sig-paren, article.pytorch-article .class dt .sig-paren { + font-family: IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace; + font-size: 87.5%; +} +article.pytorch-article .function dt a, article.pytorch-article .attribute dt a, article.pytorch-article .class .attribute dt a, article.pytorch-article .class dt a { + position: absolute; + right: 30px; + padding-right: 0; + top: 50%; + -webkit-transform: perspective(1px) translateY(-50%); + transform: perspective(1px) translateY(-50%); +} +article.pytorch-article .function dt:hover .viewcode-link, article.pytorch-article .attribute dt:hover .viewcode-link, article.pytorch-article .class dt:hover .viewcode-link { + color: #ee4c2c; +} +article.pytorch-article .function .anchorjs-link, article.pytorch-article .attribute .anchorjs-link, article.pytorch-article .class .anchorjs-link { + display: inline; + position: absolute; + right: 8px; + font-size: 1.5625rem !important; + padding-left: 0; +} +article.pytorch-article .function dt > code, article.pytorch-article .attribute dt > code, article.pytorch-article .class .attribute dt > code, article.pytorch-article .class dt > code { + color: #262626; + border-top: solid 2px #f3f4f7; + background-color: #f3f4f7; + border-bottom: solid 2px #f3f4f7; + -webkit-box-decoration-break: clone; + box-decoration-break: clone; +} +article.pytorch-article .function .viewcode-link, article.pytorch-article .attribute .viewcode-link, article.pytorch-article .class .viewcode-link { + font-size: 0.875rem; + color: #979797; + letter-spacing: 0; + line-height: 1.5rem; + text-transform: uppercase; +} +article.pytorch-article .function dd, article.pytorch-article .attribute dd, article.pytorch-article .class .attribute dd, article.pytorch-article .class dd { + padding-left: 3.75rem; +} +article.pytorch-article .function dd p, article.pytorch-article .attribute dd p, article.pytorch-article .class .attribute dd p, article.pytorch-article .class dd p { + color: #262626; +} +article.pytorch-article .function table tbody tr th.field-name, article.pytorch-article .attribute table tbody tr th.field-name, article.pytorch-article .class table tbody tr th.field-name { + white-space: nowrap; + color: #262626; + width: 20%; +} +@media screen and (min-width: 768px) { + article.pytorch-article .function table tbody tr th.field-name, article.pytorch-article .attribute table tbody tr th.field-name, article.pytorch-article .class table tbody tr th.field-name { + width: 15%; + } +} +article.pytorch-article .function table tbody tr td.field-body, article.pytorch-article .attribute table tbody tr td.field-body, article.pytorch-article .class table tbody tr td.field-body { + padding: 0.625rem; + width: 80%; + color: #262626; +} +@media screen and (min-width: 768px) { + article.pytorch-article .function table tbody tr td.field-body, article.pytorch-article .attribute table tbody tr td.field-body, article.pytorch-article .class table tbody tr td.field-body { + width: 85%; + } +} +@media screen and (min-width: 1600px) { + article.pytorch-article .function table tbody tr td.field-body, article.pytorch-article .attribute table tbody tr td.field-body, article.pytorch-article .class table tbody tr td.field-body { + padding-left: 1.25rem; + } +} +article.pytorch-article .function table tbody tr td.field-body p, article.pytorch-article .attribute table tbody tr td.field-body p, article.pytorch-article .class table tbody tr td.field-body p { + padding-left: 0px; +} +article.pytorch-article .function table tbody tr td.field-body p:last-of-type, article.pytorch-article .attribute table tbody tr td.field-body p:last-of-type, article.pytorch-article .class table tbody tr td.field-body p:last-of-type { + margin-bottom: 0; +} +article.pytorch-article .function table tbody tr td.field-body ol, article.pytorch-article .attribute table tbody tr td.field-body ol, article.pytorch-article .class table tbody tr td.field-body ol, article.pytorch-article .function table tbody tr td.field-body ul, article.pytorch-article .attribute table tbody tr td.field-body ul, article.pytorch-article .class table tbody tr td.field-body ul { + padding-left: 1rem; + padding-bottom: 0; +} +article.pytorch-article .function table.docutils.field-list, article.pytorch-article .attribute table.docutils.field-list, article.pytorch-article .class table.docutils.field-list { + margin-bottom: 0.75rem; +} +article.pytorch-article .attribute .has-code { + float: none; +} +article.pytorch-article .class dt { + border-left: none; + border-top: 3px solid #ee4c2c; + padding-left: 4em; +} +article.pytorch-article .class dt em.property { + position: absolute; + left: 0.5rem; +} +article.pytorch-article .class dd .docutils dt { + padding-left: 0.5rem; +} +article.pytorch-article .class em.property { + text-transform: uppercase; + font-style: normal; + color: #ee4c2c; + font-size: 1rem; + letter-spacing: 0; + padding-right: 0.75rem; +} +article.pytorch-article .class dl dt em.property { + position: static; + left: 0; + padding-right: 0; +} +article.pytorch-article .class .method dt, +article.pytorch-article .class .staticmethod dt { + border-left: 3px solid #ee4c2c; + border-top: none; +} +article.pytorch-article .class .method dt, +article.pytorch-article .class .staticmethod dt { + padding-left: 0.5rem; +} +article.pytorch-article .class .attribute dt { + border-top: none; +} +article.pytorch-article .class .attribute dt em.property { + position: relative; + left: 0; +} +article.pytorch-article table { + table-layout: fixed; +} + +article.pytorch-article .note, +article.pytorch-article .warning, +article.pytorch-article .tip, +article.pytorch-article .seealso, +article.pytorch-article .hint, +article.pytorch-article .important, +article.pytorch-article .caution, +article.pytorch-article .danger, +article.pytorch-article .attention, +article.pytorch-article .error { + background: #f3f4f7; + margin-top: 1.875rem; + margin-bottom: 1.125rem; +} +article.pytorch-article .note .admonition-title, +article.pytorch-article .warning .admonition-title, +article.pytorch-article .tip .admonition-title, +article.pytorch-article .seealso .admonition-title, +article.pytorch-article .hint .admonition-title, +article.pytorch-article .important .admonition-title, +article.pytorch-article .caution .admonition-title, +article.pytorch-article .danger .admonition-title, +article.pytorch-article .attention .admonition-title, +article.pytorch-article .error .admonition-title { + color: #ffffff; + letter-spacing: 1px; + text-transform: uppercase; + margin-bottom: 1.125rem; + padding: 3px 0 3px 1.375rem; + position: relative; + font-size: 0.875rem; +} +article.pytorch-article .note .admonition-title:before, +article.pytorch-article .warning .admonition-title:before, +article.pytorch-article .tip .admonition-title:before, +article.pytorch-article .seealso .admonition-title:before, +article.pytorch-article .hint .admonition-title:before, +article.pytorch-article .important .admonition-title:before, +article.pytorch-article .caution .admonition-title:before, +article.pytorch-article .danger .admonition-title:before, +article.pytorch-article .attention .admonition-title:before, +article.pytorch-article .error .admonition-title:before { + content: "\2022"; + position: absolute; + left: 9px; + color: #ffffff; + top: 2px; +} +article.pytorch-article .note p:nth-child(n + 2), +article.pytorch-article .warning p:nth-child(n + 2), +article.pytorch-article .tip p:nth-child(n + 2), +article.pytorch-article .seealso p:nth-child(n + 2), +article.pytorch-article .hint p:nth-child(n + 2), +article.pytorch-article .important p:nth-child(n + 2), +article.pytorch-article .caution p:nth-child(n + 2), +article.pytorch-article .danger p:nth-child(n + 2), +article.pytorch-article .attention p:nth-child(n + 2), +article.pytorch-article .error p:nth-child(n + 2) { + padding: 0 1.375rem; +} +article.pytorch-article .note table, +article.pytorch-article .warning table, +article.pytorch-article .tip table, +article.pytorch-article .seealso table, +article.pytorch-article .hint table, +article.pytorch-article .important table, +article.pytorch-article .caution table, +article.pytorch-article .danger table, +article.pytorch-article .attention table, +article.pytorch-article .error table { + margin: 0 2rem; + width: auto; +} +article.pytorch-article .note .pre, +article.pytorch-article .note pre, +article.pytorch-article .warning .pre, +article.pytorch-article .warning pre, +article.pytorch-article .tip .pre, +article.pytorch-article .tip pre, +article.pytorch-article .seealso .pre, +article.pytorch-article .seealso pre, +article.pytorch-article .hint .pre, +article.pytorch-article .hint pre, +article.pytorch-article .important .pre, +article.pytorch-article .important pre, +article.pytorch-article .caution .pre, +article.pytorch-article .caution pre, +article.pytorch-article .danger .pre, +article.pytorch-article .danger pre, +article.pytorch-article .attention .pre, +article.pytorch-article .attention pre, +article.pytorch-article .error .pre, +article.pytorch-article .error pre { + background: #ffffff; + outline: 1px solid #e9e9e9; +} +article.pytorch-article .note :not(dt) > code, +article.pytorch-article .warning :not(dt) > code, +article.pytorch-article .tip :not(dt) > code, +article.pytorch-article .seealso :not(dt) > code, +article.pytorch-article .hint :not(dt) > code, +article.pytorch-article .important :not(dt) > code, +article.pytorch-article .caution :not(dt) > code, +article.pytorch-article .danger :not(dt) > code, +article.pytorch-article .attention :not(dt) > code, +article.pytorch-article .error :not(dt) > code { + border-top: solid 2px #ffffff; + background-color: #ffffff; + border-bottom: solid 2px #ffffff; + padding: 0px 3px; + -webkit-box-decoration-break: clone; + box-decoration-break: clone; + outline: 1px solid #e9e9e9; +} +article.pytorch-article .note :not(dt) > code .pre, +article.pytorch-article .warning :not(dt) > code .pre, +article.pytorch-article .tip :not(dt) > code .pre, +article.pytorch-article .seealso :not(dt) > code .pre, +article.pytorch-article .hint :not(dt) > code .pre, +article.pytorch-article .important :not(dt) > code .pre, +article.pytorch-article .caution :not(dt) > code .pre, +article.pytorch-article .danger :not(dt) > code .pre, +article.pytorch-article .attention :not(dt) > code .pre, +article.pytorch-article .error :not(dt) > code .pre { + outline: 0px; + padding: 0px; +} +article.pytorch-article .note pre, +article.pytorch-article .warning pre, +article.pytorch-article .tip pre, +article.pytorch-article .seealso pre, +article.pytorch-article .hint pre, +article.pytorch-article .important pre, +article.pytorch-article .caution pre, +article.pytorch-article .danger pre, +article.pytorch-article .attention pre, +article.pytorch-article .error pre { + margin-bottom: 0; +} +article.pytorch-article .note .highlight, +article.pytorch-article .warning .highlight, +article.pytorch-article .tip .highlight, +article.pytorch-article .seealso .highlight, +article.pytorch-article .hint .highlight, +article.pytorch-article .important .highlight, +article.pytorch-article .caution .highlight, +article.pytorch-article .danger .highlight, +article.pytorch-article .attention .highlight, +article.pytorch-article .error .highlight { + margin: 0 2rem 1.125rem 2rem; +} +article.pytorch-article .note ul, +article.pytorch-article .note ol, +article.pytorch-article .warning ul, +article.pytorch-article .warning ol, +article.pytorch-article .tip ul, +article.pytorch-article .tip ol, +article.pytorch-article .seealso ul, +article.pytorch-article .seealso ol, +article.pytorch-article .hint ul, +article.pytorch-article .hint ol, +article.pytorch-article .important ul, +article.pytorch-article .important ol, +article.pytorch-article .caution ul, +article.pytorch-article .caution ol, +article.pytorch-article .danger ul, +article.pytorch-article .danger ol, +article.pytorch-article .attention ul, +article.pytorch-article .attention ol, +article.pytorch-article .error ul, +article.pytorch-article .error ol { + padding-left: 3.25rem; +} +article.pytorch-article .note ul li, +article.pytorch-article .note ol li, +article.pytorch-article .warning ul li, +article.pytorch-article .warning ol li, +article.pytorch-article .tip ul li, +article.pytorch-article .tip ol li, +article.pytorch-article .seealso ul li, +article.pytorch-article .seealso ol li, +article.pytorch-article .hint ul li, +article.pytorch-article .hint ol li, +article.pytorch-article .important ul li, +article.pytorch-article .important ol li, +article.pytorch-article .caution ul li, +article.pytorch-article .caution ol li, +article.pytorch-article .danger ul li, +article.pytorch-article .danger ol li, +article.pytorch-article .attention ul li, +article.pytorch-article .attention ol li, +article.pytorch-article .error ul li, +article.pytorch-article .error ol li { + color: #262626; +} +article.pytorch-article .note p, +article.pytorch-article .warning p, +article.pytorch-article .tip p, +article.pytorch-article .seealso p, +article.pytorch-article .hint p, +article.pytorch-article .important p, +article.pytorch-article .caution p, +article.pytorch-article .danger p, +article.pytorch-article .attention p, +article.pytorch-article .error p { + margin-top: 1.125rem; +} +article.pytorch-article .note .admonition-title { + background: #54c7ec; +} +article.pytorch-article .warning .admonition-title { + background: #e94f3b; +} +article.pytorch-article .tip .admonition-title { + background: #6bcebb; +} +article.pytorch-article .seealso .admonition-title { + background: #6bcebb; +} +article.pytorch-article .hint .admonition-title { + background: #a2cdde; +} +article.pytorch-article .important .admonition-title { + background: #5890ff; +} +article.pytorch-article .caution .admonition-title { + background: #f7923a; +} +article.pytorch-article .danger .admonition-title { + background: #db2c49; +} +article.pytorch-article .attention .admonition-title { + background: #f5a623; +} +article.pytorch-article .error .admonition-title { + background: #cc2f90; +} +article.pytorch-article .sphx-glr-download-link-note.admonition.note, +article.pytorch-article .reference.download.internal, article.pytorch-article .sphx-glr-signature { + display: none; +} +article.pytorch-article .admonition > p:last-of-type { + margin-bottom: 0; + padding-bottom: 1.125rem !important; +} + +.pytorch-article div.sphx-glr-download a { + background-color: #f3f4f7; + background-image: url("../images/arrow-down-orange.svg"); + background-repeat: no-repeat; + background-position: left 10px center; + background-size: 15px 15px; + border-radius: 0; + border: none; + display: block; + text-align: left; + padding: 0.9375rem 3.125rem; + position: relative; + margin: 1.25rem auto; +} +@media screen and (min-width: 768px) { + .pytorch-article div.sphx-glr-download a:after { + content: ""; + display: block; + width: 0; + height: 1px; + position: absolute; + bottom: 0; + left: 0; + background-color: #e44c2c; + -webkit-transition: width .250s ease-in-out; + transition: width .250s ease-in-out; + } + .pytorch-article div.sphx-glr-download a:hover:after { + width: 100%; + } +} +@media screen and (min-width: 768px) { + .pytorch-article div.sphx-glr-download a:after { + background-color: #ee4c2c; + } +} +@media screen and (min-width: 768px) { + .pytorch-article div.sphx-glr-download a { + background-position: left 20px center; + } +} +.pytorch-article div.sphx-glr-download a:hover { + -webkit-box-shadow: none; + box-shadow: none; + text-decoration: none; + background-image: url("../images/arrow-down-orange.svg"); + background-color: #f3f4f7; +} +.pytorch-article div.sphx-glr-download a span.pre { + background-color: transparent; + font-size: 1.125rem; + padding: 0; + color: #262626; +} +.pytorch-article div.sphx-glr-download a code, .pytorch-article div.sphx-glr-download a kbd, .pytorch-article div.sphx-glr-download a pre, .pytorch-article div.sphx-glr-download a samp, .pytorch-article div.sphx-glr-download a span.pre { + font-family: FreightSans, Helvetica Neue, Helvetica, Arial, sans-serif; +} + +.pytorch-article p.sphx-glr-script-out { + margin-bottom: 1.125rem; +} + +.pytorch-article div.sphx-glr-script-out { + margin-bottom: 2.5rem; +} +.pytorch-article div.sphx-glr-script-out .highlight { + margin-left: 0; + margin-top: 0; +} +.pytorch-article div.sphx-glr-script-out .highlight pre { + background-color: #fdede9; + padding: 1.5625rem; + color: #837b79; +} +.pytorch-article div.sphx-glr-script-out + p { + margin-top: unset; +} + +article.pytorch-article .wy-table-responsive table { + border: none; + border-color: #ffffff !important; + table-layout: fixed; +} +article.pytorch-article .wy-table-responsive table thead tr { + border-bottom: 2px solid #6c6c6d; +} +article.pytorch-article .wy-table-responsive table thead th { + line-height: 1.75rem; + padding-left: 0.9375rem; + padding-right: 0.9375rem; +} +article.pytorch-article .wy-table-responsive table tbody .row-odd { + background-color: #f3f4f7; +} +article.pytorch-article .wy-table-responsive table tbody td { + color: #6c6c6d; + white-space: normal; + padding: 0.9375rem; + font-size: 1rem; + line-height: 1.375rem; +} +article.pytorch-article .wy-table-responsive table tbody td .pre { + background: #ffffff; + color: #ee4c2c; + font-size: 87.5%; +} +article.pytorch-article .wy-table-responsive table tbody td code { + font-size: 87.5%; +} + +a[rel~="prev"], a[rel~="next"] { + padding: 0.375rem 0 0 0; +} + +img.next-page, +img.previous-page { + width: 8px; + height: 10px; + position: relative; + top: -1px; +} + +img.previous-page { + -webkit-transform: scaleX(-1); + transform: scaleX(-1); +} + +.rst-footer-buttons { + margin-top: 1.875rem; + margin-bottom: 1.875rem; +} +.rst-footer-buttons .btn:focus, +.rst-footer-buttons .btn.focus { + -webkit-box-shadow: none; + box-shadow: none; +} + +article.pytorch-article blockquote { + margin-left: 3.75rem; + color: #6c6c6d; +} + +article.pytorch-article .caption { + color: #6c6c6d; + letter-spacing: 0.25px; + line-height: 2.125rem; +} + +article.pytorch-article .math { + color: #262626; + width: auto; + text-align: center; +} +article.pytorch-article .math img { + width: auto; +} + +.pytorch-breadcrumbs-wrapper { + width: 100%; +} +@media screen and (min-width: 1101px) { + .pytorch-breadcrumbs-wrapper { + float: left; + margin-left: 3%; + width: 75%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-breadcrumbs-wrapper { + width: 850px; + margin-left: 1.875rem; + } +} +.pytorch-breadcrumbs-wrapper .pytorch-breadcrumbs-aside { + float: right; +} +.pytorch-breadcrumbs-wrapper .pytorch-breadcrumbs-aside .fa.fa-github { + margin-top: 5px; + display: block; +} + +.pytorch-article .container { + padding-left: 0; + padding-right: 0; + max-width: none; +} + +a:link, +a:visited, +a:hover { + color: #ee4c2c; +} + +::-webkit-input-placeholder { + color: #ee4c2c; +} + +::-moz-placeholder { + color: #ee4c2c; +} + +:-ms-input-placeholder { + color: #ee4c2c; +} + +:-moz-placeholder { + color: #ee4c2c; +} + +@media screen and (min-width: 768px) { + .site-footer a:hover { + color: #ee4c2c; + } +} + +.docs-tutorials-resources a { + color: #ee4c2c; +} + +.header-holder { + position: relative; + z-index: 201; +} + +.header-holder .main-menu ul li.active:after { + color: #ee4c2c; +} +.header-holder .main-menu ul li.active a { + color: #ee4c2c; +} +@media screen and (min-width: 1100px) { + .header-holder .main-menu ul li a:hover { + color: #ee4c2c; + } +} + +.mobile-main-menu.open ul li.active a { + color: #ee4c2c; +} + +.version { + padding-bottom: 1rem; +} + +.pytorch-call-to-action-links { + padding-top: 0; + display: -webkit-box; + display: -ms-flexbox; + display: flex; +} +@media screen and (min-width: 768px) { + .pytorch-call-to-action-links { + padding-top: 2.5rem; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + .pytorch-call-to-action-links { + padding-top: 0; + } +} +@media (min-width: 1100px) and (max-width: 1239px) { + .pytorch-call-to-action-links { + padding-top: 2.5rem; + } +} +.pytorch-call-to-action-links #tutorial-type { + display: none; +} +.pytorch-call-to-action-links .call-to-action-img, .pytorch-call-to-action-links .call-to-action-notebook-img { + height: 1.375rem; + width: 1.375rem; + margin-right: 10px; +} +.pytorch-call-to-action-links .call-to-action-notebook-img { + height: 1rem; +} +.pytorch-call-to-action-links a { + padding-right: 1.25rem; + color: #000000; + cursor: pointer; +} +.pytorch-call-to-action-links a:hover { + color: #e44c2c; +} +.pytorch-call-to-action-links a .call-to-action-desktop-view { + display: none; +} +@media screen and (min-width: 768px) { + .pytorch-call-to-action-links a .call-to-action-desktop-view { + display: block; + } +} +.pytorch-call-to-action-links a .call-to-action-mobile-view { + display: block; +} +@media screen and (min-width: 768px) { + .pytorch-call-to-action-links a .call-to-action-mobile-view { + display: none; + } +} +.pytorch-call-to-action-links a #google-colab-link, .pytorch-call-to-action-links a #download-notebook-link, +.pytorch-call-to-action-links a #github-view-link { + padding-bottom: 0.625rem; + border-bottom: 1px solid #f3f4f7; + padding-right: 2.5rem; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} +.pytorch-call-to-action-links a #google-colab-link:hover, .pytorch-call-to-action-links a #download-notebook-link:hover, +.pytorch-call-to-action-links a #github-view-link:hover { + border-bottom-color: #e44c2c; + color: #e44c2c; +} + +#tutorial-cards-container #tutorial-cards { + width: 100%; +} +#tutorial-cards-container .tutorials-nav { + padding-left: 0; + padding-right: 0; + padding-bottom: 0; +} +#tutorial-cards-container .tutorials-hr { + margin-top: 1rem; + margin-bottom: 1rem; +} +#tutorial-cards-container .card.tutorials-card { + border-radius: 0; + border-color: #f3f4f7; + height: 98px; + margin-bottom: 1.25rem; + margin-bottom: 1.875rem; + overflow: scroll; + background-color: #f3f4f7; + cursor: pointer; +} +@media screen and (min-width: 1240px) { + #tutorial-cards-container .card.tutorials-card { + height: 200px; + overflow: inherit; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card { + height: 200px; + overflow: scroll; + } +} +#tutorial-cards-container .card.tutorials-card .tutorials-image { + position: absolute; + top: 0px; + right: 0px; + height: 96px; + width: 96px; + opacity: 0.5; +} +#tutorial-cards-container .card.tutorials-card .tutorials-image img { + height: 100%; + width: 100%; +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card .tutorials-image { + height: 100%; + width: 25%; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card .tutorials-image { + height: 100%; + width: 198px; + } +} +#tutorial-cards-container .card.tutorials-card .tutorials-image:before { + content: ''; + position: absolute; + top: 0; + left: 0; + bottom: 0; + right: 0; + z-index: 1; + background: #000000; + opacity: .075; +} +#tutorial-cards-container .card.tutorials-card .card-title-container { + width: 70%; + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card .card-title-container { + width: 75%; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card .card-title-container { + width: 70%; + } +} +#tutorial-cards-container .card.tutorials-card .card-title-container h4 { + margin-bottom: 1.125rem; + margin-top: 0; + font-size: 1.5rem; +} +#tutorial-cards-container .card.tutorials-card p.card-summary, #tutorial-cards-container .card.tutorials-card p.tags { + font-size: 0.9375rem; + line-height: 1.5rem; + margin-bottom: 0; + color: #6c6c6d; + font-weight: 400; + width: 70%; +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card p.card-summary, #tutorial-cards-container .card.tutorials-card p.tags { + width: 75%; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card p.card-summary, #tutorial-cards-container .card.tutorials-card p.tags { + width: 70%; + } +} +#tutorial-cards-container .card.tutorials-card p.tags { + margin-top: 30px; + text-overflow: ellipsis; + white-space: nowrap; + overflow: hidden; +} +#tutorial-cards-container .card.tutorials-card h4 { + color: #262626; + margin-bottom: 1.125rem; +} +#tutorial-cards-container .card.tutorials-card a { + height: 100%; +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card a { + min-height: 190px; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card a { + min-height: 234px; + } +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card:after { + content: ""; + display: block; + width: 0; + height: 1px; + position: absolute; + bottom: 0; + left: 0; + background-color: #e44c2c; + -webkit-transition: width .250s ease-in-out; + transition: width .250s ease-in-out; + } + #tutorial-cards-container .card.tutorials-card:hover:after { + width: 100%; + } +} +#tutorial-cards-container .card.tutorials-card:hover { + background-color: #ffffff; + border: 1px solid #e2e2e2; + border-bottom: none; +} +#tutorial-cards-container .card.tutorials-card:hover p.card-summary { + color: #262626; +} +#tutorial-cards-container .card.tutorials-card:hover .tutorials-image { + opacity: unset; +} +#tutorial-cards-container .tutorial-tags-container { + width: 75%; +} +#tutorial-cards-container .tutorial-tags-container.active { + width: 0; +} +#tutorial-cards-container .tutorial-filter-menu ul { + list-style-type: none; + padding-left: 1.25rem; +} +#tutorial-cards-container .tutorial-filter-menu ul li { + padding-right: 1.25rem; + word-break: break-all; +} +#tutorial-cards-container .tutorial-filter-menu ul li a { + color: #979797; +} +#tutorial-cards-container .tutorial-filter-menu ul li a:hover { + color: #e44c2c; +} +#tutorial-cards-container .tutorial-filter { + cursor: pointer; +} +#tutorial-cards-container .filter-btn { + color: #979797; + border: 1px solid #979797; + display: inline-block; + text-align: center; + white-space: nowrap; + vertical-align: middle; + padding: 0.375rem 0.75rem; + font-size: 1rem; + line-height: 1.5; + margin-bottom: 5px; +} +#tutorial-cards-container .filter-btn:hover { + border: 1px solid #e44c2c; + color: #e44c2c; +} +#tutorial-cards-container .filter-btn.selected { + background-color: #e44c2c; + border: 1px solid #e44c2c; + color: #ffffff; +} +#tutorial-cards-container .all-tag-selected { + background-color: #979797; + color: #ffffff; +} +#tutorial-cards-container .all-tag-selected:hover { + border-color: #979797; + color: #ffffff; +} +#tutorial-cards-container .pagination .page { + border: 1px solid #dee2e6; + padding: 0.5rem 0.75rem; +} +#tutorial-cards-container .pagination .active .page { + background-color: #dee2e6; +} + +article.pytorch-article .tutorials-callout-container { + padding-bottom: 50px; +} +article.pytorch-article .tutorials-callout-container .col-md-6 { + padding-bottom: 10px; +} +article.pytorch-article .tutorials-callout-container .text-container { + padding: 10px 0px 30px 0px; + padding-bottom: 10px; +} +article.pytorch-article .tutorials-callout-container .text-container .body-paragraph { + color: #666666; + font-weight: 300; + font-size: 1.125rem; + line-height: 1.875rem; +} +article.pytorch-article .tutorials-callout-container .btn.callout-button { + font-size: 1.125rem; + border-radius: 0; + border: none; + background-color: #f3f4f7; + color: #6c6c6d; + font-weight: 400; + position: relative; + letter-spacing: 0.25px; +} +@media screen and (min-width: 768px) { + article.pytorch-article .tutorials-callout-container .btn.callout-button:after { + content: ""; + display: block; + width: 0; + height: 1px; + position: absolute; + bottom: 0; + left: 0; + background-color: #e44c2c; + -webkit-transition: width .250s ease-in-out; + transition: width .250s ease-in-out; + } + article.pytorch-article .tutorials-callout-container .btn.callout-button:hover:after { + width: 100%; + } +} +article.pytorch-article .tutorials-callout-container .btn.callout-button a { + color: inherit; +} + +.pytorch-container { + margin: 0 auto; + padding: 0 1.875rem; + width: auto; + position: relative; +} +@media screen and (min-width: 1100px) { + .pytorch-container { + padding: 0; + } +} +@media screen and (min-width: 1101px) { + .pytorch-container { + margin-left: 25%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-container { + margin-left: 350px; + } +} +.pytorch-container:before, .pytorch-container:after { + content: ""; + display: table; +} +.pytorch-container:after { + clear: both; +} +.pytorch-container { + *zoom: 1; +} + +.pytorch-content-wrap { + background-color: #ffffff; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + position: relative; + padding-top: 0; +} +.pytorch-content-wrap:before, .pytorch-content-wrap:after { + content: ""; + display: table; +} +.pytorch-content-wrap:after { + clear: both; +} +.pytorch-content-wrap { + *zoom: 1; +} +@media screen and (min-width: 1101px) { + .pytorch-content-wrap { + padding-top: 45px; + float: left; + width: 100%; + display: block; + } +} +@media screen and (min-width: 1600px) { + .pytorch-content-wrap { + width: 100%; + } +} + +.pytorch-content { + background: #ffffff; + width: 100%; + max-width: 700px; + position: relative; +} + +.pytorch-content-left { + min-height: 100vh; + margin-top: 2.5rem; + width: 100%; +} +@media screen and (min-width: 1101px) { + .pytorch-content-left { + margin-top: 0; + margin-left: 3%; + width: 75%; + float: left; + } +} +@media screen and (min-width: 1600px) { + .pytorch-content-left { + width: 850px; + margin-left: 30px; + } +} +.pytorch-content-left .main-content { + padding-top: 0.9375rem; +} +.pytorch-content-left .main-content ul.simple { + padding-bottom: 1.25rem; +} +.pytorch-content-left .main-content .note:nth-child(1), .pytorch-content-left .main-content .warning:nth-child(1) { + margin-top: 0; +} + +.pytorch-content-right { + display: none; + position: relative; + overflow-x: hidden; + overflow-y: hidden; +} +@media screen and (min-width: 1101px) { + .pytorch-content-right { + display: block; + margin-left: 0; + width: 19%; + float: left; + height: 100%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-content-right { + width: 280px; + } +} + +@media screen and (min-width: 1101px) { + .pytorch-side-scroll { + position: relative; + overflow-x: hidden; + overflow-y: scroll; + height: 100%; + } +} + +.pytorch-menu-vertical { + padding: 1.25rem 1.875rem 2.5rem 1.875rem; +} +@media screen and (min-width: 1101px) { + .pytorch-menu-vertical { + display: block; + padding-top: 0; + padding-right: 13.5%; + padding-bottom: 5.625rem; + } +} +@media screen and (min-width: 1600px) { + .pytorch-menu-vertical { + padding-left: 0; + padding-right: 1.5625rem; + } +} + +.pytorch-left-menu { + display: none; + background-color: #f3f4f7; + color: #262626; + overflow: scroll; +} +@media screen and (min-width: 1101px) { + .pytorch-left-menu { + display: block; + overflow-x: hidden; + overflow-y: hidden; + padding-bottom: 110px; + padding: 0 1.875rem 0 0; + width: 25%; + z-index: 200; + float: left; + } + .pytorch-left-menu.make-fixed { + position: fixed; + top: 0; + bottom: 0; + left: 0; + float: none; + } +} +@media screen and (min-width: 1600px) { + .pytorch-left-menu { + padding: 0 0 0 1.875rem; + width: 350px; + } +} + +.expand-menu, .hide-menu { + color: #6c6c6d; + padding-left: 10px; + cursor: pointer; +} + +.collapse { + display: none; +} + +.left-nav-top-caption { + padding-top: 1rem; +} + +.pytorch-left-menu p.caption { + color: #262626; + display: block; + font-size: 1rem; + line-height: 1.375rem; + margin-bottom: 1rem; + text-transform: none; + white-space: normal; +} + +.pytorch-left-menu-search { + margin-bottom: 2.5rem; +} +@media screen and (min-width: 1101px) { + .pytorch-left-menu-search { + margin: 1.25rem 0.625rem 1.875rem 0; + } +} + +.pytorch-left-menu-search ::-webkit-input-placeholder { + color: #262626; +} +.pytorch-left-menu-search ::-moz-placeholder { + color: #262626; +} +.pytorch-left-menu-search :-ms-input-placeholder { + color: #262626; +} +.pytorch-left-menu-search ::-ms-input-placeholder { + color: #262626; +} +.pytorch-left-menu-search ::placeholder { + color: #262626; +} + +.pytorch-left-menu-search input[type=text] { + border-radius: 0; + padding: 0.5rem 0.75rem; + border-color: #ffffff; + color: #262626; + border-style: solid; + font-size: 1rem; + width: 100%; + background-color: #f3f4f7; + background-image: url("../images/search-icon.svg"); + background-repeat: no-repeat; + background-size: 18px 18px; + background-position: 12px 10px; + padding-left: 40px; + background-color: #ffffff; +} +.pytorch-left-menu-search input[type=text]:focus { + outline: 0; +} + +@media screen and (min-width: 1101px) { + .pytorch-left-menu .pytorch-side-scroll { + width: 120%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-left-menu .pytorch-side-scroll { + width: 340px; + } +} + +.pytorch-right-menu { + min-height: 100px; + overflow-x: hidden; + overflow-y: hidden; + left: 0; + z-index: 200; + padding-top: 0; + position: relative; +} +@media screen and (min-width: 1101px) { + .pytorch-right-menu { + width: 100%; + } + .pytorch-right-menu.scrolling-fixed { + position: fixed; + top: 45px; + left: 83.5%; + width: 14%; + } + .pytorch-right-menu.scrolling-absolute { + position: absolute; + left: 0; + } +} +@media screen and (min-width: 1600px) { + .pytorch-right-menu { + left: 0; + width: 380px; + } + .pytorch-right-menu.scrolling-fixed { + position: fixed; + top: 45px; + left: 1230px; + } + .pytorch-right-menu.scrolling-absolute { + position: absolute; + left: 0; + } +} + +.pytorch-left-menu ul, +.pytorch-right-menu ul { + list-style-type: none; + padding-left: 0; + margin-bottom: 2.5rem; +} +.pytorch-left-menu > ul, +.pytorch-right-menu > ul { + margin-bottom: 2.5rem; +} +.pytorch-left-menu a:link, +.pytorch-left-menu a:visited, +.pytorch-left-menu a:hover, +.pytorch-right-menu a:link, +.pytorch-right-menu a:visited, +.pytorch-right-menu a:hover { + color: #6c6c6d; + font-size: 0.875rem; + line-height: 1rem; + padding: 0; + text-decoration: none; +} +.pytorch-left-menu a:link.reference.internal, +.pytorch-left-menu a:visited.reference.internal, +.pytorch-left-menu a:hover.reference.internal, +.pytorch-right-menu a:link.reference.internal, +.pytorch-right-menu a:visited.reference.internal, +.pytorch-right-menu a:hover.reference.internal { + margin-bottom: 0.3125rem; + position: relative; +} +.pytorch-left-menu li code, +.pytorch-right-menu li code { + border: none; + background: inherit; + color: inherit; + padding-left: 0; + padding-right: 0; +} +.pytorch-left-menu li span.toctree-expand, +.pytorch-right-menu li span.toctree-expand { + display: block; + float: left; + margin-left: -1.2em; + font-size: 0.8em; + line-height: 1.6em; +} +.pytorch-left-menu li.on a, .pytorch-left-menu li.current > a, +.pytorch-right-menu li.on a, +.pytorch-right-menu li.current > a { + position: relative; + border: none; +} +.pytorch-left-menu li.on a span.toctree-expand, .pytorch-left-menu li.current > a span.toctree-expand, +.pytorch-right-menu li.on a span.toctree-expand, +.pytorch-right-menu li.current > a span.toctree-expand { + display: block; + font-size: 0.8em; + line-height: 1.6em; +} +.pytorch-left-menu li.toctree-l1.current > a, +.pytorch-right-menu li.toctree-l1.current > a { + color: #ee4c2c; +} +.pytorch-left-menu li.toctree-l1.current > a:before, +.pytorch-right-menu li.toctree-l1.current > a:before { + content: "\2022"; + display: inline-block; + position: absolute; + left: -15px; + top: -10%; + font-size: 1.375rem; + color: #ee4c2c; +} +@media screen and (min-width: 1101px) { + .pytorch-left-menu li.toctree-l1.current > a:before, + .pytorch-right-menu li.toctree-l1.current > a:before { + left: -20px; + } +} +.pytorch-left-menu li.toctree-l1.current li.toctree-l2 > ul, .pytorch-left-menu li.toctree-l2.current li.toctree-l3 > ul, +.pytorch-right-menu li.toctree-l1.current li.toctree-l2 > ul, +.pytorch-right-menu li.toctree-l2.current li.toctree-l3 > ul { + display: none; +} +.pytorch-left-menu li.toctree-l1.current li.toctree-l2.current > ul, .pytorch-left-menu li.toctree-l2.current li.toctree-l3.current > ul, +.pytorch-right-menu li.toctree-l1.current li.toctree-l2.current > ul, +.pytorch-right-menu li.toctree-l2.current li.toctree-l3.current > ul { + display: block; +} +.pytorch-left-menu li.toctree-l2.current li.toctree-l3 > a, +.pytorch-right-menu li.toctree-l2.current li.toctree-l3 > a { + display: block; +} +.pytorch-left-menu li.toctree-l3, +.pytorch-right-menu li.toctree-l3 { + font-size: 0.9em; +} +.pytorch-left-menu li.toctree-l3.current li.toctree-l4 > a, +.pytorch-right-menu li.toctree-l3.current li.toctree-l4 > a { + display: block; +} +.pytorch-left-menu li.toctree-l4, +.pytorch-right-menu li.toctree-l4 { + font-size: 0.9em; +} +.pytorch-left-menu li.current ul, +.pytorch-right-menu li.current ul { + display: block; +} +.pytorch-left-menu li ul, +.pytorch-right-menu li ul { + margin-bottom: 0; + display: none; +} +.pytorch-left-menu li ul li a, +.pytorch-right-menu li ul li a { + margin-bottom: 0; +} +.pytorch-left-menu a, +.pytorch-right-menu a { + display: inline-block; + position: relative; +} +.pytorch-left-menu a:hover, +.pytorch-right-menu a:hover { + cursor: pointer; +} +.pytorch-left-menu a:active, +.pytorch-right-menu a:active { + cursor: pointer; +} + +.pytorch-left-menu ul { + padding-left: 0; +} + +.pytorch-right-menu a:link, +.pytorch-right-menu a:visited, +.pytorch-right-menu a:hover { + color: #6c6c6d; +} +.pytorch-right-menu a:link span.pre, +.pytorch-right-menu a:visited span.pre, +.pytorch-right-menu a:hover span.pre { + color: #6c6c6d; +} +.pytorch-right-menu a.reference.internal.expanded:before { + content: "-"; + font-family: monospace; + position: absolute; + left: -12px; +} +.pytorch-right-menu a.reference.internal.not-expanded:before { + content: "+"; + font-family: monospace; + position: absolute; + left: -12px; +} +.pytorch-right-menu li.active > a { + color: #ee4c2c; +} +.pytorch-right-menu li.active > a span.pre, .pytorch-right-menu li.active > a:before { + color: #ee4c2c; +} +.pytorch-right-menu li.active > a:after { + content: "\2022"; + color: #e44c2c; + display: inline-block; + font-size: 1.375rem; + left: -17px; + position: absolute; + top: 1px; +} +.pytorch-right-menu .pytorch-side-scroll > ul > li > ul > li { + margin-bottom: 0; +} +.pytorch-right-menu ul ul { + padding-left: 0; +} +.pytorch-right-menu ul ul li { + padding-left: 0px; +} +.pytorch-right-menu ul ul li a.reference.internal { + padding-left: 0; +} +.pytorch-right-menu ul ul li ul { + display: none; + padding-left: 10px; +} +.pytorch-right-menu ul ul li li a.reference.internal { + padding-left: 0; +} +.pytorch-right-menu li ul { + display: block; +} + +.pytorch-right-menu .pytorch-side-scroll { + padding-top: 20px; +} +@media screen and (min-width: 1101px) { + .pytorch-right-menu .pytorch-side-scroll { + width: 120%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-right-menu .pytorch-side-scroll { + width: 400px; + } +} +.pytorch-right-menu .pytorch-side-scroll > ul { + padding-left: 10%; + padding-right: 10%; + margin-bottom: 0; +} +@media screen and (min-width: 1600px) { + .pytorch-right-menu .pytorch-side-scroll > ul { + padding-left: 25px; + } +} +.pytorch-right-menu .pytorch-side-scroll > ul > li > a.reference.internal { + color: #262626; + font-weight: 500; +} +.pytorch-right-menu .pytorch-side-scroll ul li { + position: relative; +} + +.header-container { + max-width: none; + margin-top: 4px; +} +@media screen and (min-width: 1101px) { + .header-container { + margin-top: 0; + } +} +@media screen and (min-width: 1600px) { + .header-container { + margin-top: 0; + } +} + +.container-fluid.header-holder { + padding-right: 0; + padding-left: 0; +} + +.header-holder .container { + max-width: none; + padding-right: 1.875rem; + padding-left: 1.875rem; +} +@media screen and (min-width: 1101px) { + .header-holder .container { + padding-right: 1.875rem; + padding-left: 1.875rem; + } +} + +.header-holder .main-menu { + -webkit-box-pack: unset; + -ms-flex-pack: unset; + justify-content: unset; + position: relative; +} +@media screen and (min-width: 1101px) { + .header-holder .main-menu ul { + padding-left: 0; + margin-left: 26%; + } +} +@media screen and (min-width: 1600px) { + .header-holder .main-menu ul { + padding-left: 38px; + margin-left: 310px; + } +} + +.pytorch-page-level-bar { + display: none; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + background-color: #ffffff; + border-bottom: 1px solid #e2e2e2; + width: 100%; + z-index: 201; +} +@media screen and (min-width: 1101px) { + .pytorch-page-level-bar { + left: 0; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + height: 45px; + padding-left: 0; + width: 100%; + position: absolute; + z-index: 1; + } + .pytorch-page-level-bar.left-menu-is-fixed { + position: fixed; + top: 0; + left: 25%; + padding-left: 0; + right: 0; + width: 75%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-page-level-bar { + left: 0; + right: 0; + width: auto; + z-index: 1; + } + .pytorch-page-level-bar.left-menu-is-fixed { + left: 350px; + right: 0; + width: auto; + } +} +.pytorch-page-level-bar ul, .pytorch-page-level-bar li { + margin: 0; +} + +.pytorch-shortcuts-wrapper { + display: none; +} +@media screen and (min-width: 1101px) { + .pytorch-shortcuts-wrapper { + font-size: 0.875rem; + float: left; + margin-left: 2%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-shortcuts-wrapper { + margin-left: 1.875rem; + } +} + +.cookie-banner-wrapper { + display: none; +} +.cookie-banner-wrapper .container { + padding-left: 1.875rem; + padding-right: 1.875rem; + max-width: 1240px; +} +.cookie-banner-wrapper.is-visible { + display: block; + position: fixed; + bottom: 0; + background-color: #f3f4f7; + min-height: 100px; + width: 100%; + z-index: 401; + border-top: 3px solid #ededee; +} +.cookie-banner-wrapper .gdpr-notice { + color: #6c6c6d; + margin-top: 1.5625rem; + text-align: left; + max-width: 1440px; +} +@media screen and (min-width: 768px) { + .cookie-banner-wrapper .gdpr-notice { + width: 77%; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + .cookie-banner-wrapper .gdpr-notice { + width: inherit; + } +} +.cookie-banner-wrapper .gdpr-notice .cookie-policy-link { + color: #343434; +} +.cookie-banner-wrapper .close-button { + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; + background: transparent; + border: 1px solid #f3f4f7; + height: 1.3125rem; + position: absolute; + bottom: 42px; + right: 0; + top: 0; + cursor: pointer; + outline: none; +} +@media screen and (min-width: 768px) { + .cookie-banner-wrapper .close-button { + right: 20%; + top: inherit; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + .cookie-banner-wrapper .close-button { + right: 0; + top: 0; + } +} + +.main-menu ul li .resources-dropdown a { + cursor: pointer; +} +.main-menu ul li .dropdown-menu { + border-radius: 0; + padding: 0; +} +.main-menu ul li .dropdown-menu .dropdown-item { + color: #6c6c6d; + border-bottom: 1px solid #e2e2e2; +} +.main-menu ul li .dropdown-menu .dropdown-item:last-of-type { + border-bottom-color: transparent; +} +.main-menu ul li .dropdown-menu .dropdown-item:hover { + background-color: #e44c2c; +} +.main-menu ul li .dropdown-menu .dropdown-item p { + font-size: 1rem; + color: #979797; +} +.main-menu ul li .dropdown-menu a.dropdown-item:hover { + color: #ffffff; +} +.main-menu ul li .dropdown-menu a.dropdown-item:hover p { + color: #ffffff; +} + +.resources-dropdown-menu { + left: -75px; + width: 226px; + display: none; + position: absolute; + z-index: 1000; + display: none; + float: left; + min-width: 10rem; + padding: 0.5rem 0; + font-size: 1rem; + color: #212529; + text-align: left; + list-style: none; + background-color: #ffffff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.15); + border-radius: 0.25rem; +} + +.resources-dropdown:hover .resources-dropdown-menu { + display: block; +} + +.main-menu ul li .resources-dropdown-menu { + border-radius: 0; + padding: 0; +} +.main-menu ul li.active:hover .resources-dropdown-menu { + display: block; +} + +.main-menu ul li .resources-dropdown-menu .dropdown-item { + color: #6c6c6d; + border-bottom: 1px solid #e2e2e2; +} + +.resources-dropdown .with-down-orange-arrow { + padding-right: 2rem; + position: relative; + background: url("../images/chevron-down-orange.svg"); + background-size: 14px 18px; + background-position: top 7px right 10px; + background-repeat: no-repeat; +} + +.with-down-arrow { + padding-right: 2rem; + position: relative; + background-image: url("../images/chevron-down-black.svg"); + background-size: 14px 18px; + background-position: top 7px right 10px; + background-repeat: no-repeat; +} +.with-down-arrow:hover { + background-image: url("../images/chevron-down-orange.svg"); + background-repeat: no-repeat; +} + +.header-holder .main-menu ul li .resources-dropdown .doc-dropdown-option { + padding-top: 1rem; +} + +.header-holder .main-menu ul li a.nav-dropdown-item { + display: block; + font-size: 1rem; + line-height: 1.3125rem; + width: 100%; + padding: 0.25rem 1.5rem; + clear: both; + font-weight: 400; + color: #979797; + text-align: center; + background-color: transparent; + border-bottom: 1px solid #e2e2e2; +} +.header-holder .main-menu ul li a.nav-dropdown-item:last-of-type { + border-bottom-color: transparent; +} +.header-holder .main-menu ul li a.nav-dropdown-item:hover { + background-color: #e44c2c; + color: white; +} +.header-holder .main-menu ul li a.nav-dropdown-item .dropdown-title { + font-size: 1.125rem; + color: #6c6c6d; + letter-spacing: 0; + line-height: 34px; +} + +.header-holder .main-menu ul li a.nav-dropdown-item:hover .dropdown-title { + background-color: #e44c2c; + color: white; +} + +/*# sourceMappingURL=theme.css.map */ \ No newline at end of file diff --git a/0.9.0/_static/doctools.js b/0.9.0/_static/doctools.js new file mode 100644 index 0000000000..daccd209da --- /dev/null +++ b/0.9.0/_static/doctools.js @@ -0,0 +1,315 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for all documentation. + * + * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", + "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", + "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {}; +} + */ + +/** + * small helper function to urldecode strings + */ +jQuery.urldecode = function(x) { + return decodeURIComponent(x).replace(/\+/g, ' '); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { + this.initOnKeyListeners(); + } + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated === 'undefined') + return string; + return (typeof translated === 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated === 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash && $.browser.mozilla) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) === 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this === '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keydown(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT' + && !event.altKey && !event.ctrlKey && !event.metaKey && !event.shiftKey) { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); diff --git a/0.9.0/_static/documentation_options.js b/0.9.0/_static/documentation_options.js new file mode 100644 index 0000000000..a540135d87 --- /dev/null +++ b/0.9.0/_static/documentation_options.js @@ -0,0 +1,11 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '0.9.0a0+33b2469', + LANGUAGE: 'None', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: true +}; \ No newline at end of file diff --git a/0.9.0/_static/file.png b/0.9.0/_static/file.png new file mode 100644 index 0000000000..a858a410e4 Binary files /dev/null and b/0.9.0/_static/file.png differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-bold-italic.woff b/0.9.0/_static/fonts/FreightSans/freight-sans-bold-italic.woff new file mode 100644 index 0000000000..e317248423 Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-bold-italic.woff differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 b/0.9.0/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 new file mode 100644 index 0000000000..cec2dc94fb Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-bold.woff b/0.9.0/_static/fonts/FreightSans/freight-sans-bold.woff new file mode 100644 index 0000000000..de46625edf Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-bold.woff differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-bold.woff2 b/0.9.0/_static/fonts/FreightSans/freight-sans-bold.woff2 new file mode 100644 index 0000000000..dc05cd82bc Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-bold.woff2 differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-book-italic.woff b/0.9.0/_static/fonts/FreightSans/freight-sans-book-italic.woff new file mode 100644 index 0000000000..a50e5038a4 Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-book-italic.woff differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-book-italic.woff2 b/0.9.0/_static/fonts/FreightSans/freight-sans-book-italic.woff2 new file mode 100644 index 0000000000..fe284db661 Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-book-italic.woff2 differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-book.woff b/0.9.0/_static/fonts/FreightSans/freight-sans-book.woff new file mode 100644 index 0000000000..6ab8775f00 Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-book.woff differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-book.woff2 b/0.9.0/_static/fonts/FreightSans/freight-sans-book.woff2 new file mode 100644 index 0000000000..2688739f1f Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-book.woff2 differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-light-italic.woff b/0.9.0/_static/fonts/FreightSans/freight-sans-light-italic.woff new file mode 100644 index 0000000000..beda58d4e2 Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-light-italic.woff differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-light-italic.woff2 b/0.9.0/_static/fonts/FreightSans/freight-sans-light-italic.woff2 new file mode 100644 index 0000000000..e2fa0134b1 Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-light-italic.woff2 differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-light.woff b/0.9.0/_static/fonts/FreightSans/freight-sans-light.woff new file mode 100644 index 0000000000..226a0bf835 Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-light.woff differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-light.woff2 b/0.9.0/_static/fonts/FreightSans/freight-sans-light.woff2 new file mode 100644 index 0000000000..6d8ff2c045 Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-light.woff2 differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-medium-italic.woff b/0.9.0/_static/fonts/FreightSans/freight-sans-medium-italic.woff new file mode 100644 index 0000000000..a42115d63b Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-medium-italic.woff differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 b/0.9.0/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 new file mode 100644 index 0000000000..16a7713a45 Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-medium.woff b/0.9.0/_static/fonts/FreightSans/freight-sans-medium.woff new file mode 100644 index 0000000000..5ea34539c6 Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-medium.woff differ diff --git a/0.9.0/_static/fonts/FreightSans/freight-sans-medium.woff2 b/0.9.0/_static/fonts/FreightSans/freight-sans-medium.woff2 new file mode 100644 index 0000000000..c58b6a528b Binary files /dev/null and b/0.9.0/_static/fonts/FreightSans/freight-sans-medium.woff2 differ diff --git a/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff new file mode 100644 index 0000000000..cf37a5c50b Binary files /dev/null and b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff differ diff --git a/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 new file mode 100644 index 0000000000..955a6eab5b Binary files /dev/null and b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 differ diff --git a/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff new file mode 100644 index 0000000000..fc65a679c2 Binary files /dev/null and b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff differ diff --git a/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 new file mode 100644 index 0000000000..c352e40e34 Binary files /dev/null and b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 differ diff --git a/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff new file mode 100644 index 0000000000..7d63d89f24 Binary files /dev/null and b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff differ diff --git a/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 new file mode 100644 index 0000000000..d0d7ded907 Binary files /dev/null and b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 differ diff --git a/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff new file mode 100644 index 0000000000..1da7753cf2 Binary files /dev/null and b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff differ diff --git a/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 new file mode 100644 index 0000000000..79dffdb85f Binary files /dev/null and b/0.9.0/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 differ diff --git a/0.9.0/_static/images/arrow-down-orange.svg b/0.9.0/_static/images/arrow-down-orange.svg new file mode 100644 index 0000000000..e9d8e9ecf2 --- /dev/null +++ b/0.9.0/_static/images/arrow-down-orange.svg @@ -0,0 +1,19 @@ + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.9.0/_static/images/arrow-right-with-tail.svg b/0.9.0/_static/images/arrow-right-with-tail.svg new file mode 100644 index 0000000000..5843588fca --- /dev/null +++ b/0.9.0/_static/images/arrow-right-with-tail.svg @@ -0,0 +1,19 @@ + + + + Page 1 + Created with Sketch. + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.9.0/_static/images/chevron-down-black.svg b/0.9.0/_static/images/chevron-down-black.svg new file mode 100644 index 0000000000..097bc076ec --- /dev/null +++ b/0.9.0/_static/images/chevron-down-black.svg @@ -0,0 +1,16 @@ + + + Created with Sketch. + + + + + + + + + + + + + diff --git a/0.9.0/_static/images/chevron-down-grey.svg b/0.9.0/_static/images/chevron-down-grey.svg new file mode 100644 index 0000000000..82d6514f25 --- /dev/null +++ b/0.9.0/_static/images/chevron-down-grey.svg @@ -0,0 +1,18 @@ + + + + +Created with Sketch. + + + + + + + + + + + + diff --git a/0.9.0/_static/images/chevron-down-orange.svg b/0.9.0/_static/images/chevron-down-orange.svg new file mode 100644 index 0000000000..fd79a57854 --- /dev/null +++ b/0.9.0/_static/images/chevron-down-orange.svg @@ -0,0 +1,16 @@ + + + Created with Sketch. + + + + + + + + + + + + + diff --git a/0.9.0/_static/images/chevron-down-white.svg b/0.9.0/_static/images/chevron-down-white.svg new file mode 100644 index 0000000000..e6c94e27b6 --- /dev/null +++ b/0.9.0/_static/images/chevron-down-white.svg @@ -0,0 +1,16 @@ + + + Created with Sketch. + + + + + + + + + + + + + diff --git a/0.9.0/_static/images/chevron-right-orange.svg b/0.9.0/_static/images/chevron-right-orange.svg new file mode 100644 index 0000000000..7033fc93bf --- /dev/null +++ b/0.9.0/_static/images/chevron-right-orange.svg @@ -0,0 +1,17 @@ + + + + +Page 1 +Created with Sketch. + + + + + + + + + + diff --git a/0.9.0/_static/images/chevron-right-white.svg b/0.9.0/_static/images/chevron-right-white.svg new file mode 100644 index 0000000000..dd9e77f261 --- /dev/null +++ b/0.9.0/_static/images/chevron-right-white.svg @@ -0,0 +1,17 @@ + + + + +Page 1 +Created with Sketch. + + + + + + + + + + \ No newline at end of file diff --git a/0.9.0/_static/images/home-footer-background.jpg b/0.9.0/_static/images/home-footer-background.jpg new file mode 100644 index 0000000000..b307bb57f4 Binary files /dev/null and b/0.9.0/_static/images/home-footer-background.jpg differ diff --git a/0.9.0/_static/images/icon-close.svg b/0.9.0/_static/images/icon-close.svg new file mode 100644 index 0000000000..348964e79f --- /dev/null +++ b/0.9.0/_static/images/icon-close.svg @@ -0,0 +1,21 @@ + + + + Page 1 + Created with Sketch. + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.9.0/_static/images/icon-menu-dots-dark.svg b/0.9.0/_static/images/icon-menu-dots-dark.svg new file mode 100644 index 0000000000..fa2ad044b3 --- /dev/null +++ b/0.9.0/_static/images/icon-menu-dots-dark.svg @@ -0,0 +1,42 @@ + + + + Page 1 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/0.9.0/_static/images/logo-dark.svg b/0.9.0/_static/images/logo-dark.svg new file mode 100644 index 0000000000..9b4c1a56ac --- /dev/null +++ b/0.9.0/_static/images/logo-dark.svg @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/0.9.0/_static/images/logo-facebook-dark.svg b/0.9.0/_static/images/logo-facebook-dark.svg new file mode 100644 index 0000000000..cff17915c4 --- /dev/null +++ b/0.9.0/_static/images/logo-facebook-dark.svg @@ -0,0 +1,8 @@ + + + + + + diff --git a/0.9.0/_static/images/logo-icon.svg b/0.9.0/_static/images/logo-icon.svg new file mode 100644 index 0000000000..575f6823e4 --- /dev/null +++ b/0.9.0/_static/images/logo-icon.svg @@ -0,0 +1,12 @@ + + + + + + + + + diff --git a/0.9.0/_static/images/logo-twitter-dark.svg b/0.9.0/_static/images/logo-twitter-dark.svg new file mode 100644 index 0000000000..1572570f88 --- /dev/null +++ b/0.9.0/_static/images/logo-twitter-dark.svg @@ -0,0 +1,16 @@ + + + + + + + + diff --git a/0.9.0/_static/images/logo-youtube-dark.svg b/0.9.0/_static/images/logo-youtube-dark.svg new file mode 100644 index 0000000000..e3cfedd79d --- /dev/null +++ b/0.9.0/_static/images/logo-youtube-dark.svg @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/0.9.0/_static/images/logo.svg b/0.9.0/_static/images/logo.svg new file mode 100644 index 0000000000..f8d44b9842 --- /dev/null +++ b/0.9.0/_static/images/logo.svg @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/0.9.0/_static/images/pytorch-colab.svg b/0.9.0/_static/images/pytorch-colab.svg new file mode 100644 index 0000000000..2ab15e2f30 --- /dev/null +++ b/0.9.0/_static/images/pytorch-colab.svg @@ -0,0 +1,24 @@ + + + + + + + + + + + + diff --git a/0.9.0/_static/images/pytorch-download.svg b/0.9.0/_static/images/pytorch-download.svg new file mode 100644 index 0000000000..cc37d638e9 --- /dev/null +++ b/0.9.0/_static/images/pytorch-download.svg @@ -0,0 +1,10 @@ + + + + + + diff --git a/0.9.0/_static/images/pytorch-github.svg b/0.9.0/_static/images/pytorch-github.svg new file mode 100644 index 0000000000..2c2570da1d --- /dev/null +++ b/0.9.0/_static/images/pytorch-github.svg @@ -0,0 +1,15 @@ + + + + + + diff --git a/0.9.0/_static/images/pytorch-x.svg b/0.9.0/_static/images/pytorch-x.svg new file mode 100644 index 0000000000..74856ea9fd --- /dev/null +++ b/0.9.0/_static/images/pytorch-x.svg @@ -0,0 +1,10 @@ + + + + + + + diff --git a/0.9.0/_static/images/search-icon.svg b/0.9.0/_static/images/search-icon.svg new file mode 100644 index 0000000000..ebb0df8677 --- /dev/null +++ b/0.9.0/_static/images/search-icon.svg @@ -0,0 +1,19 @@ + + + + Created with Sketch. + + + + + + + + + + + + + + + diff --git a/0.9.0/_static/images/view-page-source-icon.svg b/0.9.0/_static/images/view-page-source-icon.svg new file mode 100644 index 0000000000..6f5bbe0748 --- /dev/null +++ b/0.9.0/_static/images/view-page-source-icon.svg @@ -0,0 +1,13 @@ + + + + + + + + + + diff --git a/0.9.0/_static/img/pytorch-logo-dark.png b/0.9.0/_static/img/pytorch-logo-dark.png new file mode 100644 index 0000000000..b7a1ceb964 Binary files /dev/null and b/0.9.0/_static/img/pytorch-logo-dark.png differ diff --git a/0.9.0/_static/img/pytorch-logo-dark.svg b/0.9.0/_static/img/pytorch-logo-dark.svg new file mode 100644 index 0000000000..5e53000385 --- /dev/null +++ b/0.9.0/_static/img/pytorch-logo-dark.svg @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/0.9.0/_static/img/pytorch-logo-flame.png b/0.9.0/_static/img/pytorch-logo-flame.png new file mode 100644 index 0000000000..bad49bf30b Binary files /dev/null and b/0.9.0/_static/img/pytorch-logo-flame.png differ diff --git a/0.9.0/_static/jquery-3.4.1.js b/0.9.0/_static/jquery-3.4.1.js new file mode 100644 index 0000000000..773ad95c56 --- /dev/null +++ b/0.9.0/_static/jquery-3.4.1.js @@ -0,0 +1,10598 @@ +/*! + * jQuery JavaScript Library v3.4.1 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2019-05-01T21:04Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var document = window.document; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var concat = arr.concat; + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + +var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + return typeof obj === "function" && typeof obj.nodeType !== "number"; + }; + + +var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + + + + var preservedScriptAttributes = { + type: true, + src: true, + nonce: true, + noModule: true + }; + + function DOMEval( code, node, doc ) { + doc = doc || document; + + var i, val, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + + // Support: Firefox 64+, Edge 18+ + // Some browsers don't support the "nonce" property on scripts. + // On the other hand, just using `getAttribute` is not enough as + // the `nonce` attribute is reset to an empty string whenever it + // becomes browsing-context connected. + // See https://github.com/whatwg/html/issues/2369 + // See https://html.spec.whatwg.org/#nonce-attributes + // The `node.getAttribute` check was added for the sake of + // `jQuery.globalEval` so that it can fake a nonce-containing node + // via an object. + val = node[ i ] || node.getAttribute && node.getAttribute( i ); + if ( val ) { + script.setAttribute( i, val ); + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + +function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; +} +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.4.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android <=4.0 only + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + copy = options[ name ]; + + // Prevent Object.prototype pollution + // Prevent never-ending loop + if ( name === "__proto__" || target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + src = target[ name ]; + + // Ensure proper type for the source value + if ( copyIsArray && !Array.isArray( src ) ) { + clone = []; + } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { + clone = {}; + } else { + clone = src; + } + copyIsArray = false; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + // Evaluates a script in a global context + globalEval: function( code, options ) { + DOMEval( code, { nonce: options && options.nonce } ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.4 + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://js.foundation/ + * + * Date: 2019-04-08 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + nonnativeSelectorCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + rdescend = new RegExp( whitespace + "|>" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rhtml = /HTML$/i, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + inDisabledFieldset = addCombinator( + function( elem ) { + return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !nonnativeSelectorCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) && + + // Support: IE 8 only + // Exclude object elements + (nodeType !== 1 || context.nodeName.toLowerCase() !== "object") ) { + + newSelector = selector; + newContext = context; + + // qSA considers elements outside a scoping root when evaluating child or + // descendant combinators, which is not what we want. + // In such cases, we work around the behavior by prefixing every selector in the + // list with an ID selector referencing the scope context. + // Thanks to Andrew Dupont for this technique. + if ( nodeType === 1 && rdescend.test( selector ) ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + nonnativeSelectorCache( selector, true ); + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + inDisabledFieldset( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + var namespace = elem.namespaceURI, + docElem = (elem.ownerDocument || elem).documentElement; + + // Support: IE <=8 + // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes + // https://bugs.jquery.com/ticket/4833 + return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9-11, Edge + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + if ( preferredDoc !== document && + (subWindow = document.defaultView) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert(function( el ) { + el.className = "i"; + return !el.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( el ) { + el.appendChild( document.createComment("") ); + return !el.getElementsByTagName("*").length; + }); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + }); + + // ID filter and find + if ( support.getById ) { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( (elem = elems[i++]) ) { + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( el ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll("[msallowcapture^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push("~="); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push(".#.+[+~]"); + } + }); + + assert(function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement("input"); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll(":enabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll(":disabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( el ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === document ? -1 : + b === document ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + if ( support.matchesSelector && documentIsHTML && + !nonnativeSelectorCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch (e) { + nonnativeSelectorCache( expr, true ); + } + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return (sel + "").replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + // Use previously-cached element index if available + if ( useCache ) { + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + // Don't keep the element (issue #299) + input[0] = null; + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? + argument + length : + argument > length ? + length : + argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( (oldCache = uniqueCache[ key ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context === document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + if ( !context && elem.ownerDocument !== document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context || document, xml) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( el ) { + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( el ) { + return el.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +}; +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Filtered directly for both simple and complex selectors + return jQuery.filter( qualifier, elements, not ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( typeof elem.contentDocument !== "undefined" ) { + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && toType( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // rejected_handlers.disable + // fulfilled_handlers.disable + tuples[ 3 - i ][ 3 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock, + + // progress_handlers.lock + tuples[ 0 ][ 3 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the master Deferred + master = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + master.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( master.state() === "pending" || + isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return master.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); + } + + return master.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( toType( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; + + +// Matches dashed string for camelizing +var rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g; + +// Used by camelCase as callback to replace() +function fcamelCase( all, letter ) { + return letter.toUpperCase(); +} + +// Convert dashed to camelCase; used by the css and data modules +// Support: IE <=9 - 11, Edge 12 - 15 +// Microsoft forgot to hump their vendor prefix (#9572) +function camelCase( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); +} +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( camelCase ); + } else { + key = camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var documentElement = document.documentElement; + + + + var isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ); + }, + composed = { composed: true }; + + // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only + // Check attachment across shadow DOM boundaries when possible (gh-3504) + // Support: iOS 10.0-10.2 only + // Early iOS 10 versions support `attachShadow` but not `getRootNode`, + // leading to errors. We need to check for `getRootNode`. + if ( documentElement.getRootNode ) { + isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ) || + elem.getRootNode( composed ) === elem.ownerDocument; + }; + } +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + isAttached( elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + +var swap = function( elem, options, callback, args ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.apply( elem, args || [] ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, scale, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = elem.nodeType && + ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Support: Firefox <=54 + // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) + initial = initial / 2; + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + while ( maxIterations-- ) { + + // Evaluate and update our best guess (doubling guesses that zero out). + // Finish if the scale equals or crosses 1 (making the old*new product non-positive). + jQuery.style( elem, prop, initialInUnit + unit ); + if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { + maxIterations = 0; + } + initialInUnit = initialInUnit / scale; + + } + + initialInUnit = initialInUnit * 2; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); + +var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); + + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // Support: IE <=9 only + option: [ 1, "" ], + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +// Support: IE <=9 only +wrapMap.optgroup = wrapMap.option; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, attached, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + attached = isAttached( elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( attached ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; +} )(); + + +var + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 - 11+ +// focus() and blur() are asynchronous, except when they are no-op. +// So expect focus to be synchronous when the element is already active, +// and blur to be synchronous when the element is not already active. +// (focus and blur are always synchronous in other supported browsers, +// this just defines when we can count on it). +function expectSync( elem, type ) { + return ( elem === safeActiveElement() ) === ( type === "focus" ); +} + +// Support: IE <=9 only +// Accessing document.activeElement can throw unexpectedly +// https://bugs.jquery.com/ticket/13393 +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = {}; + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + // Make a writable jQuery.Event from the native event object + var event = jQuery.event.fix( nativeEvent ); + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // If the event is namespaced, then each handler is only invoked if it is + // specially universal or its namespaces are a superset of the event's. + if ( !event.rnamespace || handleObj.namespace === false || + event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + click: { + + // Utilize native event to ensure correct state for checkable inputs + setup: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Claim the first handler + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + // dataPriv.set( el, "click", ... ) + leverageNative( el, "click", returnTrue ); + } + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Force setup before triggering a click + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + leverageNative( el, "click" ); + } + + // Return non-false to allow normal event-path propagation + return true; + }, + + // For cross-browser consistency, suppress native .click() on links + // Also prevent it if we're currently inside a leveraged native-event stack + _default: function( event ) { + var target = event.target; + return rcheckableType.test( target.type ) && + target.click && nodeName( target, "input" ) && + dataPriv.get( target, "click" ) || + nodeName( target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +// Ensure the presence of an event listener that handles manually-triggered +// synthetic events by interrupting progress until reinvoked in response to +// *native* events that it fires directly, ensuring that state changes have +// already occurred before other listeners are invoked. +function leverageNative( el, type, expectSync ) { + + // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add + if ( !expectSync ) { + if ( dataPriv.get( el, type ) === undefined ) { + jQuery.event.add( el, type, returnTrue ); + } + return; + } + + // Register the controller as a special universal handler for all event namespaces + dataPriv.set( el, type, false ); + jQuery.event.add( el, type, { + namespace: false, + handler: function( event ) { + var notAsync, result, + saved = dataPriv.get( this, type ); + + if ( ( event.isTrigger & 1 ) && this[ type ] ) { + + // Interrupt processing of the outer synthetic .trigger()ed event + // Saved data should be false in such cases, but might be a leftover capture object + // from an async native handler (gh-4350) + if ( !saved.length ) { + + // Store arguments for use when handling the inner native event + // There will always be at least one argument (an event object), so this array + // will not be confused with a leftover capture object. + saved = slice.call( arguments ); + dataPriv.set( this, type, saved ); + + // Trigger the native event and capture its result + // Support: IE <=9 - 11+ + // focus() and blur() are asynchronous + notAsync = expectSync( this, type ); + this[ type ](); + result = dataPriv.get( this, type ); + if ( saved !== result || notAsync ) { + dataPriv.set( this, type, false ); + } else { + result = {}; + } + if ( saved !== result ) { + + // Cancel the outer synthetic event + event.stopImmediatePropagation(); + event.preventDefault(); + return result.value; + } + + // If this is an inner synthetic event for an event with a bubbling surrogate + // (focus or blur), assume that the surrogate already propagated from triggering the + // native event and prevent that from happening again here. + // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the + // bubbling surrogate propagates *after* the non-bubbling base), but that seems + // less bad than duplication. + } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { + event.stopPropagation(); + } + + // If this is a native event triggered above, everything is now in order + // Fire an inner synthetic event with the original arguments + } else if ( saved.length ) { + + // ...and capture the result + dataPriv.set( this, type, { + value: jQuery.event.trigger( + + // Support: IE <=9 - 11+ + // Extend with the prototype to reset the above stopImmediatePropagation() + jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), + saved.slice( 1 ), + this + ) + } ); + + // Abort handling of the native event + event.stopImmediatePropagation(); + } + } + } ); +} + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + code: true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + + which: function( event ) { + var button = event.button; + + // Add which for key events + if ( event.which == null && rkeyEvent.test( event.type ) ) { + return event.charCode != null ? event.charCode : event.keyCode; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { + if ( button & 1 ) { + return 1; + } + + if ( button & 2 ) { + return 3; + } + + if ( button & 4 ) { + return 2; + } + + return 0; + } + + return event.which; + } +}, jQuery.event.addProp ); + +jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { + jQuery.event.special[ type ] = { + + // Utilize native event if possible so blur/focus sequence is correct + setup: function() { + + // Claim the first handler + // dataPriv.set( this, "focus", ... ) + // dataPriv.set( this, "blur", ... ) + leverageNative( this, type, expectSync ); + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function() { + + // Force setup before trigger + leverageNative( this, type ); + + // Return non-false to allow normal event-path propagation + return true; + }, + + delegateType: delegateType + }; +} ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + /* eslint-disable max-len */ + + // See https://github.com/eslint/eslint/issues/3229 + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, + + /* eslint-enable */ + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.access( src ); + pdataCur = dataPriv.set( dest, pdataOld ); + events = pdataOld.events; + + if ( events ) { + delete pdataCur.handle; + pdataCur.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl && !node.noModule ) { + jQuery._evalUrl( node.src, { + nonce: node.nonce || node.getAttribute( "nonce" ) + } ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && isAttached( node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html.replace( rxhtmlTag, "<$1>" ); + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = isAttached( elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + // Support: Chrome <=64 + // Don't get tricked when zoom affects offsetWidth (gh-4029) + div.style.position = "absolute"; + scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !isAttached( elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style, + vendorProps = {}; + +// Return a vendor-prefixed property or undefined +function vendorPropName( name ) { + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a potentially-mapped jQuery.cssProps or vendor prefixed property +function finalPropName( name ) { + var final = jQuery.cssProps[ name ] || vendorProps[ name ]; + + if ( final ) { + return final; + } + if ( name in emptyStyle ) { + return name; + } + return vendorProps[ name ] = vendorPropName( name ) || name; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }; + +function setPositiveNumber( elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + + // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter + // Use an explicit zero to avoid NaN (gh-3964) + ) ) || 0; + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). + // Fake content-box until we know it's needed to know the true value. + boxSizingNeeded = !support.boxSizingReliable() || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox, + + val = curCSS( elem, dimension, styles ), + offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + // Support: IE 9-11 only + // Also use offsetWidth/offsetHeight for when box sizing is unreliable + // We use getClientRects() to check for hidden/disconnected. + // In those cases, the computed value can be trusted to be border-box + if ( ( !support.boxSizingReliable() && isBorderBox || + val === "auto" || + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + elem.getClientRects().length ) { + + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Where available, offsetWidth/offsetHeight approximate border box dimensions. + // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the + // retrieved value as a content box dimension. + valueIsBorderBox = offsetProp in elem; + if ( valueIsBorderBox ) { + val = elem[ offsetProp ]; + } + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "gridArea": true, + "gridColumn": true, + "gridColumnEnd": true, + "gridColumnStart": true, + "gridRow": true, + "gridRowEnd": true, + "gridRowStart": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append + // "px" to a few hardcoded values. + if ( type === "number" && !isCustomProp ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + + // Only read styles.position if the test has a chance to fail + // to avoid forcing a reflow. + scrollboxSizeBuggy = !support.scrollboxSize() && + styles.position === "absolute", + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) + boxSizingNeeded = scrollboxSizeBuggy || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra ? + boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ) : + 0; + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && scrollboxSizeBuggy ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && ( + jQuery.cssHooks[ tween.prop ] || + tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue && type !== false ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); + + if ( typeof stateVal === "boolean" && isValidValue ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + valueIsFunction = isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( valueIsFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +support.focusin = "onfocusin" in window; + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); + } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = Date.now(); + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) { + xml = undefined; + } + + if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { + jQuery.error( "Invalid XML: " + data ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && toType( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + if ( a == null ) { + return ""; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ) + .filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ) + .map( function( i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() + " " ] = + ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) + .concat( match[ 2 ] ); + } + } + match = responseHeaders[ key.toLowerCase() + " " ]; + } + return match == null ? null : match.join( ", " ); + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 15 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available and should be processed, append data to url + if ( s.data && ( s.processData || typeof s.data === "string" ) ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + + +jQuery._evalUrl = function( url, options ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + + // Only evaluate the response if it is successful (gh-4126) + // dataFilter is not invoked for failure responses, so using it instead + // of the default converter is kludgy but it works. + converters: { + "text script": function() {} + }, + dataFilter: function( response ) { + jQuery.globalEval( response, options ); + } + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var htmlIsFunction = isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.ontimeout = + xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain or forced-by-attrs requests + if ( s.crossDomain || s.scriptAttrs ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • torchaudio.backend
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchaudio.backend

+
+

Overview

+

torchaudio.backend module provides implementations for audio file I/O functionalities, which are torchaudio.info, torchaudio.load, and torchaudio.save.

+

There are currently four implementations available.

+ +
+

Note

+

Instead of calling functions in torchaudio.backend directly, please use torchaudio.info, torchaudio.load, and torchaudio.save with proper backend set with torchaudio.set_audio_backend().

+
+
+

Availability

+

"sox_io" backend requires C++ extension module, which is included in Linux/macOS binary distributions. This backend is not available on Windows.

+

"soundfile" backend requires SoundFile. Please refer to the SoundFile documentation for the installation.

+
+
+
+

Common Data Structure

+

Structures used to report the metadata of audio files.

+
+

AudioMetaData

+
+
+class torchaudio.backend.common.AudioMetaData(sample_rate: int, num_frames: int, num_channels: int, bits_per_sample: int, encoding: str)[source]
+

Return type of torchaudio.info function.

+

This class is used by “sox_io” backend and +“soundfile” backend with the new interface.

+
+
Variables
+
    +
  • sample_rate (int) – Sample rate

  • +
  • num_frames (int) – The number of frames

  • +
  • num_channels (int) – The number of channels

  • +
  • bits_per_sample (int) – The number of bits per sample. This is 0 for lossy formats, +or when it cannot be accurately inferred.

  • +
  • encoding (str) –

    Audio encoding +The values encoding can take are one of the following:

    +
    +
      +
    • PCM_S: Signed integer linear PCM

    • +
    • PCM_U: Unsigned integer linear PCM

    • +
    • PCM_F: Floating point linear PCM

    • +
    • FLAC: Flac, Free Lossless Audio Codec

    • +
    • ULAW: Mu-law

    • +
    • ALAW: A-law

    • +
    • MP3 : MP3, MPEG-1 Audio Layer III

    • +
    • VORBIS: OGG Vorbis

    • +
    • AMR_WB: Adaptive Multi-Rate

    • +
    • AMR_NB: Adaptive Multi-Rate Wideband

    • +
    • OPUS: Opus

    • +
    • UNKNOWN : None of above

    • +
    +
    +

  • +
+
+
+
+ +
+
+
+

Sox IO Backend

+

The "sox_io" backend is available and default on Linux/macOS and not available on Windows.

+

I/O functions of this backend support TorchScript.

+

You can switch from another backend to the sox_io backend with the following;

+
torchaudio.set_audio_backend("sox_io")
+
+
+
+

info

+
+
+torchaudio.backend.sox_io_backend.info(filepath: str, format: Optional[str] = None) → torchaudio.backend.common.AudioMetaData[source]
+

Get signal information of an audio file.

+
+
Parameters
+
    +
  • filepath (path-like object or file-like object) –

    Source of audio data. When the function is not compiled by TorchScript, +(e.g. torch.jit.script), the following types are accepted;

    +
    +
      +
    • path-like: file path

    • +
    • file-like: Object with read(size: int) -> bytes method, +which returns byte string of at most size length.

    • +
    +
    +

    When the function is compiled by TorchScript, only str type is allowed.

    +
    +

    Note

    +
      +
    • When the input type is file-like object, this function cannot +get the correct length (num_samples) for certain formats, +such as mp3 and vorbis. +In this case, the value of num_samples is 0.

    • +
    • This argument is intentionally annotated as str only due to +TorchScript compiler compatibility.

    • +
    +
    +

  • +
  • format (str, optional) – Override the format detection with the given format. +Providing the argument might help when libsox can not infer the format +from header or extension,

  • +
+
+
Returns
+

Metadata of the given audio.

+
+
Return type
+

AudioMetaData

+
+
+
+ +
+
+

load

+
+
+torchaudio.backend.sox_io_backend.load(filepath: str, frame_offset: int = 0, num_frames: int = -1, normalize: bool = True, channels_first: bool = True, format: Optional[str] = None) → Tuple[torch.Tensor, int][source]
+

Load audio data from file.

+
+

Note

+

This function can handle all the codecs that underlying libsox can handle, +however it is tested on the following formats;

+
    +
  • WAV, AMB

    +
    +
      +
    • 32-bit floating-point

    • +
    • 32-bit signed integer

    • +
    • 24-bit signed integer

    • +
    • 16-bit signed integer

    • +
    • 8-bit unsigned integer (WAV only)

    • +
    +
    +
  • +
  • MP3

  • +
  • FLAC

  • +
  • OGG/VORBIS

  • +
  • OPUS

  • +
  • SPHERE

  • +
  • AMR-NB

  • +
+

To load MP3, FLAC, OGG/VORBIS, OPUS and other codecs libsox does not +handle natively, your installation of torchaudio has to be linked to libsox +and corresponding codec libraries such as libmad or libmp3lame etc.

+
+

By default (normalize=True, channels_first=True), this function returns Tensor with +float32 dtype and the shape of [channel, time]. +The samples are normalized to fit in the range of [-1.0, 1.0].

+

When the input format is WAV with integer type, such as 32-bit signed integer, 16-bit +signed integer, 24-bit signed integer, and 8-bit unsigned integer, by providing normalize=False, +this function can return integer Tensor, where the samples are expressed within the whole range +of the corresponding dtype, that is, int32 tensor for 32-bit signed PCM, +int16 for 16-bit signed PCM and uint8 for 8-bit unsigned PCM. Since torch does not +support int24 dtype, 24-bit signed PCM are converted to int32 tensors.

+

normalize parameter has no effect on 32-bit floating-point WAV and other formats, such as +flac and mp3. +For these formats, this function always returns float32 Tensor with values normalized to +[-1.0, 1.0].

+
+
Parameters
+
    +
  • filepath (path-like object or file-like object) –

    Source of audio data. When the function is not compiled by TorchScript, +(e.g. torch.jit.script), the following types are accepted;

    +
    +
      +
    • path-like: file path

    • +
    • file-like: Object with read(size: int) -> bytes method, +which returns byte string of at most size length.

    • +
    +
    +

    When the function is compiled by TorchScript, only str type is allowed.

    +

    Note: This argument is intentionally annotated as str only due to +TorchScript compiler compatibility.

    +

  • +
  • frame_offset (int) – Number of frames to skip before start reading data.

  • +
  • num_frames (int) – Maximum number of frames to read. -1 reads all the remaining samples, +starting from frame_offset. +This function may return the less number of frames if there is not enough +frames in the given file.

  • +
  • normalize (bool) – When True, this function always return float32, and sample values are +normalized to [-1.0, 1.0]. +If input file is integer WAV, giving False will change the resulting Tensor type to +integer type. +This argument has no effect for formats other than integer WAV type.

  • +
  • channels_first (bool) – When True, the returned Tensor has dimension [channel, time]. +Otherwise, the returned Tensor’s dimension is [time, channel].

  • +
  • format (str, optional) – Override the format detection with the given format. +Providing the argument might help when libsox can not infer the format +from header or extension,

  • +
+
+
Returns
+

+
Resulting Tensor and sample rate.

If the input file has integer wav format and normalization is off, then it has +integer type, else float32 type. If channels_first=True, it has +[channel, time] else [time, channel].

+
+
+

+
+
Return type
+

Tuple[torch.Tensor, int]

+
+
+
+ +
+
+

save

+
+
+torchaudio.backend.sox_io_backend.save(filepath: str, src: torch.Tensor, sample_rate: int, channels_first: bool = True, compression: Optional[float] = None, format: Optional[str] = None, encoding: Optional[str] = None, bits_per_sample: Optional[int] = None)[source]
+

Save audio data to file.

+
+
Parameters
+
    +
  • filepath (str or pathlib.Path) – Path to save file. +This function also handles pathlib.Path objects, but is annotated +as str for TorchScript compiler compatibility.

  • +
  • src (torch.Tensor) – Audio data to save. must be 2D tensor.

  • +
  • sample_rate (int) – sampling rate

  • +
  • channels_first (bool) – If True, the given tensor is interpreted as [channel, time], +otherwise [time, channel].

  • +
  • compression (Optional[float]) –

    Used for formats other than WAV. +This corresponds to -C option of sox command.

    +
    +
    "mp3"

    Either bitrate (in kbps) with quality factor, such as 128.2, or +VBR encoding with quality factor such as -4.2. Default: -4.5.

    +
    +
    "flac"

    Whole number from 0 to 8. 8 is default and highest compression.

    +
    +
    "ogg", "vorbis"

    Number from -1 to 10; -1 is the highest compression +and lowest quality. Default: 3.

    +
    +
    +

    See the detail at http://sox.sourceforge.net/soxformat.html.

    +

  • +
  • format (str, optional) –

    Override the audio format. +When filepath argument is path-like object, audio format is infered from +file extension. If file extension is missing or different, you can specify the +correct format with this argument.

    +

    When filepath argument is file-like object, this argument is required.

    +

    Valid values are "wav", "mp3", "ogg", "vorbis", "amr-nb", +"amb", "flac", "sph", "gsm", and "htk".

    +

  • +
  • encoding (str, optional) –

    Changes the encoding for the supported formats. +This argument is effective only for supported formats, such as "wav", ""amb" +and "sph". Valid values are;

    +
    +
      +
    • "PCM_S" (signed integer Linear PCM)

    • +
    • "PCM_U" (unsigned integer Linear PCM)

    • +
    • "PCM_F" (floating point PCM)

    • +
    • "ULAW" (mu-law)

    • +
    • "ALAW" (a-law)

    • +
    +
    +
    +
    Default values

    If not provided, the default value is picked based on format and bits_per_sample.

    +
    +
    "wav", "amb"
      +
    • +
      If both encoding and bits_per_sample are not provided, the dtype of the
      +
      Tensor is used to determine the default value. +- "PCM_U" if dtype is uint8 +- "PCM_S" if dtype is int16 or int32` +- ``"PCM_F" if dtype is float32
      +
      +
    • +
    • "PCM_U" if bits_per_sample=8

    • +
    • "PCM_S" otherwise

    • +
    +
    +
    "sph" format;
      +
    • the default value is "PCM_S"

    • +
    +
    +
    +
    +
    +

  • +
  • bits_per_sample (int, optional) –

    Changes the bit depth for the supported formats. +When format is one of "wav", "flac", "sph", or "amb", you can change the +bit depth. Valid values are 8, 16, 32 and 64.

    +
    +
    Default Value;

    If not provided, the default values are picked based on format and "encoding";

    +
    +
    "wav", "amb";
      +
    • +
      If both encoding and bits_per_sample are not provided, the dtype of the
      +
      Tensor is used. +- 8 if dtype is uint8 +- 16 if dtype is int16 +- 32 if dtype is int32 or float32
      +
      +
    • +
    • 8 if encoding is "PCM_U", "ULAW" or "ALAW"

    • +
    • 16 if encoding is "PCM_S"

    • +
    • 32 if encoding is "PCM_F"

    • +
    +
    +
    "flac" format;
      +
    • the default value is 24

    • +
    +
    +
    "sph" format;
      +
    • 16 if encoding is "PCM_U", "PCM_S", "PCM_F" or not provided.

    • +
    • 8 if encoding is "ULAW" or "ALAW"

    • +
    +
    +
    "amb" format;
      +
    • 8 if encoding is "PCM_U", "ULAW" or "ALAW"

    • +
    • 16 if encoding is "PCM_S" or not provided.

    • +
    • 32 if encoding is "PCM_F"

    • +
    +
    +
    +
    +
    +

  • +
+
+
+

Supported formats/encodings/bit depth/compression are;

+
+
"wav", "amb"
    +
  • 32-bit floating-point PCM

  • +
  • 32-bit signed integer PCM

  • +
  • 24-bit signed integer PCM

  • +
  • 16-bit signed integer PCM

  • +
  • 8-bit unsigned integer PCM

  • +
  • 8-bit mu-law

  • +
  • 8-bit a-law

  • +
+

Note: Default encoding/bit depth is determined by the dtype of the input Tensor.

+
+
"mp3"

Fixed bit rate (such as 128kHz) and variable bit rate compression. +Default: VBR with high quality.

+
+
"flac"
    +
  • 8-bit

  • +
  • 16-bit

  • +
  • 24-bit (default)

  • +
+
+
"ogg", "vorbis"
    +
  • Different quality level. Default: approx. 112kbps

  • +
+
+
"sph"
    +
  • 8-bit signed integer PCM

  • +
  • 16-bit signed integer PCM

  • +
  • 24-bit signed integer PCM

  • +
  • 32-bit signed integer PCM (default)

  • +
  • 8-bit mu-law

  • +
  • 8-bit a-law

  • +
  • 16-bit a-law

  • +
  • 24-bit a-law

  • +
  • 32-bit a-law

  • +
+
+
"amr-nb"

Bitrate ranging from 4.75 kbit/s to 12.2 kbit/s. Default: 4.75 kbit/s

+
+
"gsm"

Lossy Speech Compression, CPU intensive.

+
+
"htk"

Uses a default single-channel 16-bit PCM format.

+
+
+
+

Note

+

To save into formats that libsox does not handle natively, (such as "mp3", +"flac", "ogg" and "vorbis"), your installation of torchaudio has +to be linked to libsox and corresponding codec libraries such as libmad +or libmp3lame etc.

+
+
+ +
+
+
+

Soundfile Backend

+

The "soundfile" backend is available when SoundFile is installed. This backend is the default on Windows.

+

You can switch from another backend to the "soundfile" backend with the following;

+
torchaudio.set_audio_backend("soundfile")
+
+
+
+

info

+
+
+torchaudio.backend.soundfile_backend.info(filepath: str, format: Optional[str] = None) → torchaudio.backend.common.AudioMetaData[source]
+

Get signal information of an audio file.

+
+

Note

+

filepath argument is intentionally annotated as str only, even though it accepts +pathlib.Path object as well. This is for the consistency with "sox_io" backend, +which has a restriction on type annotation due to TorchScript compiler compatiblity.

+
+
+
Parameters
+
    +
  • filepath (path-like object or file-like object) – Source of audio data.

  • +
  • format (str, optional) – Not used. PySoundFile does not accept format hint.

  • +
+
+
Returns
+

meta data of the given audio.

+
+
Return type
+

AudioMetaData

+
+
+
+ +
+
+

load

+
+
+torchaudio.backend.soundfile_backend.load(filepath: str, frame_offset: int = 0, num_frames: int = -1, normalize: bool = True, channels_first: bool = True, format: Optional[str] = None) → Tuple[torch.Tensor, int][source]
+

Load audio data from file.

+
+

Note

+

The formats this function can handle depend on the soundfile installation. +This function is tested on the following formats;

+
    +
  • WAV

    +
    +
      +
    • 32-bit floating-point

    • +
    • 32-bit signed integer

    • +
    • 16-bit signed integer

    • +
    • 8-bit unsigned integer

    • +
    +
    +
  • +
  • FLAC

  • +
  • OGG/VORBIS

  • +
  • SPHERE

  • +
+
+

By default (normalize=True, channels_first=True), this function returns Tensor with +float32 dtype and the shape of [channel, time]. +The samples are normalized to fit in the range of [-1.0, 1.0].

+

When the input format is WAV with integer type, such as 32-bit signed integer, 16-bit +signed integer and 8-bit unsigned integer (24-bit signed integer is not supported), +by providing normalize=False, this function can return integer Tensor, where the samples +are expressed within the whole range of the corresponding dtype, that is, int32 tensor +for 32-bit signed PCM, int16 for 16-bit signed PCM and uint8 for 8-bit unsigned PCM.

+

normalize parameter has no effect on 32-bit floating-point WAV and other formats, such as +flac and mp3. +For these formats, this function always returns float32 Tensor with values normalized to +[-1.0, 1.0].

+
+

Note

+

filepath argument is intentionally annotated as str only, even though it accepts +pathlib.Path object as well. This is for the consistency with "sox_io" backend, +which has a restriction on type annotation due to TorchScript compiler compatiblity.

+
+
+
Parameters
+
    +
  • filepath (path-like object or file-like object) – Source of audio data.

  • +
  • frame_offset (int) – Number of frames to skip before start reading data.

  • +
  • num_frames (int) – Maximum number of frames to read. -1 reads all the remaining samples, +starting from frame_offset. +This function may return the less number of frames if there is not enough +frames in the given file.

  • +
  • normalize (bool) – When True, this function always return float32, and sample values are +normalized to [-1.0, 1.0]. +If input file is integer WAV, giving False will change the resulting Tensor type to +integer type. +This argument has no effect for formats other than integer WAV type.

  • +
  • channels_first (bool) – When True, the returned Tensor has dimension [channel, time]. +Otherwise, the returned Tensor’s dimension is [time, channel].

  • +
  • format (str, optional) – Not used. PySoundFile does not accept format hint.

  • +
+
+
Returns
+

+
Resulting Tensor and sample rate.

If the input file has integer wav format and normalization is off, then it has +integer type, else float32 type. If channels_first=True, it has +[channel, time] else [time, channel].

+
+
+

+
+
Return type
+

Tuple[torch.Tensor, int]

+
+
+
+ +
+
+

save

+
+
+torchaudio.backend.soundfile_backend.save(filepath: str, src: torch.Tensor, sample_rate: int, channels_first: bool = True, compression: Optional[float] = None, format: Optional[str] = None, encoding: Optional[str] = None, bits_per_sample: Optional[int] = None)[source]
+

Save audio data to file.

+
+

Note

+

The formats this function can handle depend on the soundfile installation. +This function is tested on the following formats;

+
    +
  • WAV

    +
    +
      +
    • 32-bit floating-point

    • +
    • 32-bit signed integer

    • +
    • 16-bit signed integer

    • +
    • 8-bit unsigned integer

    • +
    +
    +
  • +
  • FLAC

  • +
  • OGG/VORBIS

  • +
  • SPHERE

  • +
+
+
+

Note

+

filepath argument is intentionally annotated as str only, even though it accepts +pathlib.Path object as well. This is for the consistency with "sox_io" backend, +which has a restriction on type annotation due to TorchScript compiler compatiblity.

+
+
+
Parameters
+
    +
  • filepath (str or pathlib.Path) – Path to audio file.

  • +
  • src (torch.Tensor) – Audio data to save. must be 2D tensor.

  • +
  • sample_rate (int) – sampling rate

  • +
  • channels_first (bool) – If True, the given tensor is interpreted as [channel, time], +otherwise [time, channel].

  • +
  • compression (Optional[float]) – Not used. +It is here only for interface compatibility reson with “sox_io” backend.

  • +
  • format (str, optional) –

    Override the audio format. +When filepath argument is path-like object, audio format is +inferred from file extension. If the file extension is missing or +different, you can specify the correct format with this argument.

    +

    When filepath argument is file-like object, +this argument is required.

    +

    Valid values are "wav", "ogg", "vorbis", +"flac" and "sph".

    +

  • +
  • encoding (str, optional) –

    Changes the encoding for supported formats. +This argument is effective only for supported formats, sush as +"wav", ""flac" and "sph". Valid values are;

    +
    +
      +
    • "PCM_S" (signed integer Linear PCM)

    • +
    • "PCM_U" (unsigned integer Linear PCM)

    • +
    • "PCM_F" (floating point PCM)

    • +
    • "ULAW" (mu-law)

    • +
    • "ALAW" (a-law)

    • +
    +
    +

  • +
  • bits_per_sample (int, optional) – Changes the bit depth for the +supported formats. +When format is one of "wav", "flac" or "sph", +you can change the bit depth. +Valid values are 8, 16, 24, 32 and 64.

  • +
+
+
+

Supported formats/encodings/bit depth/compression are:

+
+
"wav"
    +
  • 32-bit floating-point PCM

  • +
  • 32-bit signed integer PCM

  • +
  • 24-bit signed integer PCM

  • +
  • 16-bit signed integer PCM

  • +
  • 8-bit unsigned integer PCM

  • +
  • 8-bit mu-law

  • +
  • 8-bit a-law

  • +
+
+
Note: Default encoding/bit depth is determined by the dtype of

the input Tensor.

+
+
+
+
"flac"
    +
  • 8-bit

  • +
  • 16-bit

  • +
  • 24-bit (default)

  • +
+
+
"ogg", "vorbis"
    +
  • Doesn’t accept changing configuration.

  • +
+
+
"sph"
    +
  • 8-bit signed integer PCM

  • +
  • 16-bit signed integer PCM

  • +
  • 24-bit signed integer PCM

  • +
  • 32-bit signed integer PCM (default)

  • +
  • 8-bit mu-law

  • +
  • 8-bit a-law

  • +
  • 16-bit a-law

  • +
  • 24-bit a-law

  • +
  • 32-bit a-law

  • +
+
+
+
+ +
+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/compliance.kaldi.html b/0.9.0/compliance.kaldi.html new file mode 100644 index 0000000000..7946b586d9 --- /dev/null +++ b/0.9.0/compliance.kaldi.html @@ -0,0 +1,858 @@ + + + + + + + + + + + + + torchaudio.compliance.kaldi — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • torchaudio.compliance.kaldi
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchaudio.compliance.kaldi

+

The useful processing operations of kaldi can be performed with torchaudio. +Various functions with identical parameters are given so that torchaudio can +produce similar outputs.

+
+

Functions

+
+

spectrogram

+
+
+torchaudio.compliance.kaldi.spectrogram(waveform: torch.Tensor, blackman_coeff: float = 0.42, channel: int = -1, dither: float = 0.0, energy_floor: float = 1.0, frame_length: float = 25.0, frame_shift: float = 10.0, min_duration: float = 0.0, preemphasis_coefficient: float = 0.97, raw_energy: bool = True, remove_dc_offset: bool = True, round_to_power_of_two: bool = True, sample_frequency: float = 16000.0, snip_edges: bool = True, subtract_mean: bool = False, window_type: str = 'povey') → torch.Tensor[source]
+

Create a spectrogram from a raw audio signal. This matches the input/output of Kaldi’s +compute-spectrogram-feats.

+
+
Parameters
+
    +
  • waveform (Tensor) – Tensor of audio of size (c, n) where c is in the range [0,2)

  • +
  • blackman_coeff (float, optional) – Constant coefficient for generalized Blackman window. (Default: 0.42)

  • +
  • channel (int, optional) – Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: -1)

  • +
  • dither (float, optional) – Dithering constant (0.0 means no dither). If you turn this off, you should set +the energy_floor option, e.g. to 1.0 or 0.1 (Default: 0.0)

  • +
  • energy_floor (float, optional) – Floor on energy (absolute, not relative) in Spectrogram computation. Caution: +this floor is applied to the zeroth component, representing the total signal energy. The floor on the +individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: 1.0)

  • +
  • frame_length (float, optional) – Frame length in milliseconds (Default: 25.0)

  • +
  • frame_shift (float, optional) – Frame shift in milliseconds (Default: 10.0)

  • +
  • min_duration (float, optional) – Minimum duration of segments to process (in seconds). (Default: 0.0)

  • +
  • preemphasis_coefficient (float, optional) – Coefficient for use in signal preemphasis (Default: 0.97)

  • +
  • raw_energy (bool, optional) – If True, compute energy before preemphasis and windowing (Default: True)

  • +
  • remove_dc_offset (bool, optional) – Subtract mean from waveform on each frame (Default: True)

  • +
  • round_to_power_of_two (bool, optional) – If True, round window size to power of two by zero-padding input +to FFT. (Default: True)

  • +
  • sample_frequency (float, optional) – Waveform data sample frequency (must match the waveform file, if +specified there) (Default: 16000.0)

  • +
  • snip_edges (bool, optional) – If True, end effects will be handled by outputting only frames that completely fit +in the file, and the number of frames depends on the frame_length. If False, the number of frames +depends only on the frame_shift, and we reflect the data at the ends. (Default: True)

  • +
  • subtract_mean (bool, optional) – Subtract mean of each feature file [CMS]; not recommended to do +it this way. (Default: False)

  • +
  • window_type (str, optional) – Type of window (‘hamming’|’hanning’|’povey’|’rectangular’|’blackman’) +(Default: 'povey')

  • +
+
+
Returns
+

A spectrogram identical to what Kaldi would output. The shape is +(m, padded_window_size // 2 + 1) where m is calculated in _get_strided

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

fbank

+
+
+torchaudio.compliance.kaldi.fbank(waveform: torch.Tensor, blackman_coeff: float = 0.42, channel: int = -1, dither: float = 0.0, energy_floor: float = 1.0, frame_length: float = 25.0, frame_shift: float = 10.0, high_freq: float = 0.0, htk_compat: bool = False, low_freq: float = 20.0, min_duration: float = 0.0, num_mel_bins: int = 23, preemphasis_coefficient: float = 0.97, raw_energy: bool = True, remove_dc_offset: bool = True, round_to_power_of_two: bool = True, sample_frequency: float = 16000.0, snip_edges: bool = True, subtract_mean: bool = False, use_energy: bool = False, use_log_fbank: bool = True, use_power: bool = True, vtln_high: float = -500.0, vtln_low: float = 100.0, vtln_warp: float = 1.0, window_type: str = 'povey') → torch.Tensor[source]
+

Create a fbank from a raw audio signal. This matches the input/output of Kaldi’s +compute-fbank-feats.

+
+
Parameters
+
    +
  • waveform (Tensor) – Tensor of audio of size (c, n) where c is in the range [0,2)

  • +
  • blackman_coeff (float, optional) – Constant coefficient for generalized Blackman window. (Default: 0.42)

  • +
  • channel (int, optional) – Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: -1)

  • +
  • dither (float, optional) – Dithering constant (0.0 means no dither). If you turn this off, you should set +the energy_floor option, e.g. to 1.0 or 0.1 (Default: 0.0)

  • +
  • energy_floor (float, optional) – Floor on energy (absolute, not relative) in Spectrogram computation. Caution: +this floor is applied to the zeroth component, representing the total signal energy. The floor on the +individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: 1.0)

  • +
  • frame_length (float, optional) – Frame length in milliseconds (Default: 25.0)

  • +
  • frame_shift (float, optional) – Frame shift in milliseconds (Default: 10.0)

  • +
  • high_freq (float, optional) – High cutoff frequency for mel bins (if <= 0, offset from Nyquist) +(Default: 0.0)

  • +
  • htk_compat (bool, optional) – If true, put energy last. Warning: not sufficient to get HTK compatible features +(need to change other parameters). (Default: False)

  • +
  • low_freq (float, optional) – Low cutoff frequency for mel bins (Default: 20.0)

  • +
  • min_duration (float, optional) – Minimum duration of segments to process (in seconds). (Default: 0.0)

  • +
  • num_mel_bins (int, optional) – Number of triangular mel-frequency bins (Default: 23)

  • +
  • preemphasis_coefficient (float, optional) – Coefficient for use in signal preemphasis (Default: 0.97)

  • +
  • raw_energy (bool, optional) – If True, compute energy before preemphasis and windowing (Default: True)

  • +
  • remove_dc_offset (bool, optional) – Subtract mean from waveform on each frame (Default: True)

  • +
  • round_to_power_of_two (bool, optional) – If True, round window size to power of two by zero-padding input +to FFT. (Default: True)

  • +
  • sample_frequency (float, optional) – Waveform data sample frequency (must match the waveform file, if +specified there) (Default: 16000.0)

  • +
  • snip_edges (bool, optional) – If True, end effects will be handled by outputting only frames that completely fit +in the file, and the number of frames depends on the frame_length. If False, the number of frames +depends only on the frame_shift, and we reflect the data at the ends. (Default: True)

  • +
  • subtract_mean (bool, optional) – Subtract mean of each feature file [CMS]; not recommended to do +it this way. (Default: False)

  • +
  • use_energy (bool, optional) – Add an extra dimension with energy to the FBANK output. (Default: False)

  • +
  • use_log_fbank (bool, optional) – If true, produce log-filterbank, else produce linear. (Default: True)

  • +
  • use_power (bool, optional) – If true, use power, else use magnitude. (Default: True)

  • +
  • vtln_high (float, optional) – High inflection point in piecewise linear VTLN warping function (if +negative, offset from high-mel-freq (Default: -500.0)

  • +
  • vtln_low (float, optional) – Low inflection point in piecewise linear VTLN warping function (Default: 100.0)

  • +
  • vtln_warp (float, optional) – Vtln warp factor (only applicable if vtln_map not specified) (Default: 1.0)

  • +
  • window_type (str, optional) – Type of window (‘hamming’|’hanning’|’povey’|’rectangular’|’blackman’) +(Default: 'povey')

  • +
+
+
Returns
+

A fbank identical to what Kaldi would output. The shape is (m, num_mel_bins + use_energy) +where m is calculated in _get_strided

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

mfcc

+
+
+torchaudio.compliance.kaldi.mfcc(waveform: torch.Tensor, blackman_coeff: float = 0.42, cepstral_lifter: float = 22.0, channel: int = -1, dither: float = 0.0, energy_floor: float = 1.0, frame_length: float = 25.0, frame_shift: float = 10.0, high_freq: float = 0.0, htk_compat: bool = False, low_freq: float = 20.0, num_ceps: int = 13, min_duration: float = 0.0, num_mel_bins: int = 23, preemphasis_coefficient: float = 0.97, raw_energy: bool = True, remove_dc_offset: bool = True, round_to_power_of_two: bool = True, sample_frequency: float = 16000.0, snip_edges: bool = True, subtract_mean: bool = False, use_energy: bool = False, vtln_high: float = -500.0, vtln_low: float = 100.0, vtln_warp: float = 1.0, window_type: str = 'povey') → torch.Tensor[source]
+

Create a mfcc from a raw audio signal. This matches the input/output of Kaldi’s +compute-mfcc-feats.

+
+
Parameters
+
    +
  • waveform (Tensor) – Tensor of audio of size (c, n) where c is in the range [0,2)

  • +
  • blackman_coeff (float, optional) – Constant coefficient for generalized Blackman window. (Default: 0.42)

  • +
  • cepstral_lifter (float, optional) – Constant that controls scaling of MFCCs (Default: 22.0)

  • +
  • channel (int, optional) – Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: -1)

  • +
  • dither (float, optional) – Dithering constant (0.0 means no dither). If you turn this off, you should set +the energy_floor option, e.g. to 1.0 or 0.1 (Default: 0.0)

  • +
  • energy_floor (float, optional) – Floor on energy (absolute, not relative) in Spectrogram computation. Caution: +this floor is applied to the zeroth component, representing the total signal energy. The floor on the +individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: 1.0)

  • +
  • frame_length (float, optional) – Frame length in milliseconds (Default: 25.0)

  • +
  • frame_shift (float, optional) – Frame shift in milliseconds (Default: 10.0)

  • +
  • high_freq (float, optional) – High cutoff frequency for mel bins (if <= 0, offset from Nyquist) +(Default: 0.0)

  • +
  • htk_compat (bool, optional) – If true, put energy last. Warning: not sufficient to get HTK compatible +features (need to change other parameters). (Default: False)

  • +
  • low_freq (float, optional) – Low cutoff frequency for mel bins (Default: 20.0)

  • +
  • num_ceps (int, optional) – Number of cepstra in MFCC computation (including C0) (Default: 13)

  • +
  • min_duration (float, optional) – Minimum duration of segments to process (in seconds). (Default: 0.0)

  • +
  • num_mel_bins (int, optional) – Number of triangular mel-frequency bins (Default: 23)

  • +
  • preemphasis_coefficient (float, optional) – Coefficient for use in signal preemphasis (Default: 0.97)

  • +
  • raw_energy (bool, optional) – If True, compute energy before preemphasis and windowing (Default: True)

  • +
  • remove_dc_offset (bool, optional) – Subtract mean from waveform on each frame (Default: True)

  • +
  • round_to_power_of_two (bool, optional) – If True, round window size to power of two by zero-padding input +to FFT. (Default: True)

  • +
  • sample_frequency (float, optional) – Waveform data sample frequency (must match the waveform file, if +specified there) (Default: 16000.0)

  • +
  • snip_edges (bool, optional) – If True, end effects will be handled by outputting only frames that completely fit +in the file, and the number of frames depends on the frame_length. If False, the number of frames +depends only on the frame_shift, and we reflect the data at the ends. (Default: True)

  • +
  • subtract_mean (bool, optional) – Subtract mean of each feature file [CMS]; not recommended to do +it this way. (Default: False)

  • +
  • use_energy (bool, optional) – Add an extra dimension with energy to the FBANK output. (Default: False)

  • +
  • vtln_high (float, optional) – High inflection point in piecewise linear VTLN warping function (if +negative, offset from high-mel-freq (Default: -500.0)

  • +
  • vtln_low (float, optional) – Low inflection point in piecewise linear VTLN warping function (Default: 100.0)

  • +
  • vtln_warp (float, optional) – Vtln warp factor (only applicable if vtln_map not specified) (Default: 1.0)

  • +
  • window_type (str, optional) – Type of window (‘hamming’|’hanning’|’povey’|’rectangular’|’blackman’) +(Default: "povey")

  • +
+
+
Returns
+

A mfcc identical to what Kaldi would output. The shape is (m, num_ceps) +where m is calculated in _get_strided

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

resample_waveform

+
+
+torchaudio.compliance.kaldi.resample_waveform(waveform: torch.Tensor, orig_freq: float, new_freq: float, lowpass_filter_width: int = 6, rolloff: float = 0.99, resampling_method: str = 'sinc_interpolation') → torch.Tensor[source]
+

Resamples the waveform at the new frequency.

+

This is a wrapper around torchaudio.functional.resample.

+
+
Parameters
+
    +
  • waveform (Tensor) – The input signal of size (…, time)

  • +
  • orig_freq (float) – The original frequency of the signal

  • +
  • new_freq (float) – The desired frequency

  • +
  • lowpass_filter_width (int, optional) – Controls the sharpness of the filter, more == sharper +but less efficient. We suggest around 4 to 10 for normal use. (Default: 6)

  • +
  • rolloff (float, optional) – The roll-off frequency of the filter, as a fraction of the Nyquist. +Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: 0.99)

  • +
  • resampling_method (str, optional) – The resampling method to use. +Options: [sinc_interpolation, kaiser_window] (Default: 'sinc_interpolation')

  • +
+
+
Returns
+

The waveform at the new frequency

+
+
Return type
+

Tensor

+
+
+
+ +
+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/datasets.html b/0.9.0/datasets.html new file mode 100644 index 0000000000..03f77e5f97 --- /dev/null +++ b/0.9.0/datasets.html @@ -0,0 +1,1155 @@ + + + + + + + + + + + + + torchaudio.datasets — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • torchaudio.datasets
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchaudio.datasets

+

All datasets are subclasses of torch.utils.data.Dataset +and have __getitem__ and __len__ methods implemented. +Hence, they can all be passed to a torch.utils.data.DataLoader +which can load multiple samples parallelly using torch.multiprocessing workers. +For example:

+
yesno_data = torchaudio.datasets.YESNO('.', download=True)
+data_loader = torch.utils.data.DataLoader(yesno_data,
+                                          batch_size=1,
+                                          shuffle=True,
+                                          num_workers=args.nThreads)
+
+
+

The following datasets are available:

+ +

All the datasets have almost similar API. They all have two common arguments: +transform and target_transform to transform the input and target respectively.

+
+

CMUARCTIC

+
+
+class torchaudio.datasets.CMUARCTIC(root: Union[str, pathlib.Path], url: str = 'aew', folder_in_archive: str = 'ARCTIC', download: bool = False)[source]
+

Create a Dataset for CMU_ARCTIC.

+
+
Parameters
+
    +
  • root (str or Path) – Path to the directory where the dataset is found or downloaded.

  • +
  • url (str, optional) – The URL to download the dataset from or the type of the dataset to dowload. +(default: "aew") +Allowed type values are "aew", "ahw", "aup", "awb", "axb", "bdl", +"clb", "eey", "fem", "gka", "jmk", "ksp", "ljm", "lnh", +"rms", "rxr", "slp" or "slt".

  • +
  • folder_in_archive (str, optional) – The top-level directory of the dataset. (default: "ARCTIC")

  • +
  • download (bool, optional) – Whether to download the dataset if it is not found at root path. (default: False).

  • +
+
+
+
+
+__getitem__(n: int) → Tuple[torch.Tensor, int, str, str][source]
+

Load the n-th sample from the dataset.

+
+
Parameters
+

n (int) – The index of the sample to be loaded

+
+
Returns
+

(waveform, sample_rate, utterance, utterance_id)

+
+
Return type
+

tuple

+
+
+
+ +
+ +
+
+

COMMONVOICE

+
+
+class torchaudio.datasets.COMMONVOICE(root: Union[str, pathlib.Path], tsv: str = 'train.tsv')[source]
+

Create a Dataset for CommonVoice.

+
+
Parameters
+
    +
  • root (str or Path) – Path to the directory where the dataset is located. +(Where the tsv file is present.)

  • +
  • tsv (str, optional) – The name of the tsv file used to construct the metadata, such as +"train.tsv", "test.tsv", "dev.tsv", "invalidated.tsv", +"validated.tsv" and "other.tsv". (default: "train.tsv")

  • +
+
+
+
+
+__getitem__(n: int) → Tuple[torch.Tensor, int, Dict[str, str]][source]
+

Load the n-th sample from the dataset.

+
+
Parameters
+

n (int) – The index of the sample to be loaded

+
+
Returns
+

(waveform, sample_rate, dictionary), where dictionary is built +from the TSV file with the following keys: client_id, path, sentence, +up_votes, down_votes, age, gender and accent.

+
+
Return type
+

tuple

+
+
+
+ +
+ +
+
+

GTZAN

+
+
+class torchaudio.datasets.GTZAN(root: Union[str, pathlib.Path], url: str = 'http://opihi.cs.uvic.ca/sound/genres.tar.gz', folder_in_archive: str = 'genres', download: bool = False, subset: Optional[str] = None)[source]
+

Create a Dataset for GTZAN.

+
+

Note

+

Please see http://marsyas.info/downloads/datasets.html if you are planning to use +this dataset to publish results.

+
+
+
Parameters
+
    +
  • root (str or Path) – Path to the directory where the dataset is found or downloaded.

  • +
  • url (str, optional) – The URL to download the dataset from. +(default: "http://opihi.cs.uvic.ca/sound/genres.tar.gz")

  • +
  • folder_in_archive (str, optional) – The top-level directory of the dataset.

  • +
  • download (bool, optional) – Whether to download the dataset if it is not found at root path. (default: False).

  • +
  • subset (str, optional) – Which subset of the dataset to use. +One of "training", "validation", "testing" or None. +If None, the entire dataset is used. (default: None).

  • +
+
+
+
+
+__getitem__(n: int) → Tuple[torch.Tensor, int, str][source]
+

Load the n-th sample from the dataset.

+
+
Parameters
+

n (int) – The index of the sample to be loaded

+
+
Returns
+

(waveform, sample_rate, label)

+
+
Return type
+

tuple

+
+
+
+ +
+ +
+
+

LIBRISPEECH

+
+
+class torchaudio.datasets.LIBRISPEECH(root: Union[str, pathlib.Path], url: str = 'train-clean-100', folder_in_archive: str = 'LibriSpeech', download: bool = False)[source]
+

Create a Dataset for LibriSpeech.

+
+
Parameters
+
    +
  • root (str or Path) – Path to the directory where the dataset is found or downloaded.

  • +
  • url (str, optional) – The URL to download the dataset from, +or the type of the dataset to dowload. +Allowed type values are "dev-clean", "dev-other", "test-clean", +"test-other", "train-clean-100", "train-clean-360" and +"train-other-500". (default: "train-clean-100")

  • +
  • folder_in_archive (str, optional) – The top-level directory of the dataset. (default: "LibriSpeech")

  • +
  • download (bool, optional) – Whether to download the dataset if it is not found at root path. (default: False).

  • +
+
+
+
+
+__getitem__(n: int) → Tuple[torch.Tensor, int, str, int, int, int][source]
+

Load the n-th sample from the dataset.

+
+
Parameters
+

n (int) – The index of the sample to be loaded

+
+
Returns
+

(waveform, sample_rate, utterance, speaker_id, chapter_id, utterance_id)

+
+
Return type
+

tuple

+
+
+
+ +
+ +
+
+

LIBRITTS

+
+
+class torchaudio.datasets.LIBRITTS(root: Union[str, pathlib.Path], url: str = 'train-clean-100', folder_in_archive: str = 'LibriTTS', download: bool = False)[source]
+

Create a Dataset for LibriTTS.

+
+
Parameters
+
    +
  • root (str or Path) – Path to the directory where the dataset is found or downloaded.

  • +
  • url (str, optional) – The URL to download the dataset from, +or the type of the dataset to dowload. +Allowed type values are "dev-clean", "dev-other", "test-clean", +"test-other", "train-clean-100", "train-clean-360" and +"train-other-500". (default: "train-clean-100")

  • +
  • folder_in_archive (str, optional) – The top-level directory of the dataset. (default: "LibriTTS")

  • +
  • download (bool, optional) – Whether to download the dataset if it is not found at root path. (default: False).

  • +
+
+
+
+
+__getitem__(n: int) → Tuple[torch.Tensor, int, str, str, int, int, str][source]
+

Load the n-th sample from the dataset.

+
+
Parameters
+

n (int) – The index of the sample to be loaded

+
+
Returns
+

(waveform, sample_rate, original_text, normalized_text, speaker_id, +chapter_id, utterance_id)

+
+
Return type
+

tuple

+
+
+
+ +
+ +
+
+

LJSPEECH

+
+
+class torchaudio.datasets.LJSPEECH(root: Union[str, pathlib.Path], url: str = 'https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2', folder_in_archive: str = 'wavs', download: bool = False)[source]
+

Create a Dataset for LJSpeech-1.1.

+
+
Parameters
+
    +
  • root (str or Path) – Path to the directory where the dataset is found or downloaded.

  • +
  • url (str, optional) – The URL to download the dataset from. +(default: "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2")

  • +
  • folder_in_archive (str, optional) – The top-level directory of the dataset. (default: "wavs")

  • +
  • download (bool, optional) – Whether to download the dataset if it is not found at root path. (default: False).

  • +
+
+
+
+
+__getitem__(n: int) → Tuple[torch.Tensor, int, str, str][source]
+

Load the n-th sample from the dataset.

+
+
Parameters
+

n (int) – The index of the sample to be loaded

+
+
Returns
+

(waveform, sample_rate, transcript, normalized_transcript)

+
+
Return type
+

tuple

+
+
+
+ +
+ +
+
+

SPEECHCOMMANDS

+
+
+class torchaudio.datasets.SPEECHCOMMANDS(root: Union[str, pathlib.Path], url: str = 'speech_commands_v0.02', folder_in_archive: str = 'SpeechCommands', download: bool = False, subset: Optional[str] = None)[source]
+

Create a Dataset for Speech Commands.

+
+
Parameters
+
    +
  • root (str or Path) – Path to the directory where the dataset is found or downloaded.

  • +
  • url (str, optional) – The URL to download the dataset from, +or the type of the dataset to dowload. +Allowed type values are "speech_commands_v0.01" and "speech_commands_v0.02" +(default: "speech_commands_v0.02")

  • +
  • folder_in_archive (str, optional) – The top-level directory of the dataset. (default: "SpeechCommands")

  • +
  • download (bool, optional) – Whether to download the dataset if it is not found at root path. (default: False).

  • +
  • subset (Optional[str]) – Select a subset of the dataset [None, “training”, “validation”, “testing”]. None means +the whole dataset. “validation” and “testing” are defined in “validation_list.txt” and +“testing_list.txt”, respectively, and “training” is the rest. Details for the files +“validation_list.txt” and “testing_list.txt” are explained in the README of the dataset +and in the introduction of Section 7 of the original paper and its reference 12. The +original paper can be found here. (Default: None)

  • +
+
+
+
+
+__getitem__(n: int) → Tuple[torch.Tensor, int, str, str, int][source]
+

Load the n-th sample from the dataset.

+
+
Parameters
+

n (int) – The index of the sample to be loaded

+
+
Returns
+

(waveform, sample_rate, label, speaker_id, utterance_number)

+
+
Return type
+

tuple

+
+
+
+ +
+ +
+
+

TEDLIUM

+
+
+class torchaudio.datasets.TEDLIUM(root: Union[str, pathlib.Path], release: str = 'release1', subset: str = None, download: bool = False, audio_ext='.sph')[source]
+

Create a Dataset for Tedlium. It supports releases 1,2 and 3.

+
+
Parameters
+
    +
  • root (str or Path) – Path to the directory where the dataset is found or downloaded.

  • +
  • release (str, optional) – Release version. +Allowed values are "release1", "release2" or "release3". +(default: "release1").

  • +
  • subset (str, optional) – The subset of dataset to use. Valid options are "train", "dev", +and "test" for releases 1&2, None for release3. Defaults to "train" or None.

  • +
  • download (bool, optional) – Whether to download the dataset if it is not found at root path. (default: False).

  • +
+
+
+
+
+__getitem__(n: int) → Tuple[torch.Tensor, int, str, int, int, int][source]
+

Load the n-th sample from the dataset.

+
+
Parameters
+

n (int) – The index of the sample to be loaded

+
+
Returns
+

(waveform, sample_rate, transcript, talk_id, speaker_id, identifier)

+
+
Return type
+

tuple

+
+
+
+ +
+
+property phoneme_dict
+

Phonemes. Mapping from word to tuple of phonemes. +Note that some words have empty phonemes.

+
+
Type
+

dict[str, tuple[str]]

+
+
+
+ +
+ +
+
+

VCTK

+
+
+class torchaudio.datasets.VCTK(root: Union[str, pathlib.Path], url: str = 'https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip', folder_in_archive: str = 'VCTK-Corpus', download: bool = False, downsample: bool = False)[source]
+

Create a Dataset for VCTK.

+
+

Note

+ +
+
+
Parameters
+
    +
  • root (str or Path) – Path to the directory where the dataset is found or downloaded.

  • +
  • url (str, optional) – Not used as the dataset is no longer publicly available.

  • +
  • folder_in_archive (str, optional) – The top-level directory of the dataset. (default: "VCTK-Corpus")

  • +
  • download (bool, optional) – Whether to download the dataset if it is not found at root path. (default: False). +Giving download=True will result in error as the dataset is no longer +publicly available.

  • +
  • downsample (bool, optional) – Not used.

  • +
+
+
+
+
+__getitem__(n: int) → Tuple[torch.Tensor, int, str, str, str][source]
+

Load the n-th sample from the dataset.

+
+
Parameters
+

n (int) – The index of the sample to be loaded

+
+
Returns
+

(waveform, sample_rate, utterance, speaker_id, utterance_id)

+
+
Return type
+

tuple

+
+
+
+ +
+ +
+
+

VCTK_092

+
+
+class torchaudio.datasets.VCTK_092(root: str, mic_id: str = 'mic2', download: bool = False, url: str = 'https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip', audio_ext='.flac')[source]
+

Create VCTK 0.92 Dataset

+
+
Parameters
+
    +
  • root (str) – Root directory where the dataset’s top level directory is found.

  • +
  • mic_id (str) – Microphone ID. Either "mic1" or "mic2". (default: "mic2")

  • +
  • download (bool, optional) – Whether to download the dataset if it is not found at root path. (default: False).

  • +
  • url (str, optional) – The URL to download the dataset from. +(default: "https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip")

  • +
  • audio_ext (str, optional) – Custom audio extension if dataset is converted to non-default audio format.

  • +
+
+
+
+

Note

+
    +
  • All the speeches from speaker p315 will be skipped due to the lack of the corresponding text files.

  • +
  • All the speeches from p280 will be skipped for mic_id="mic2" due to the lack of the audio files.

  • +
  • Some of the speeches from speaker p362 will be skipped due to the lack of the audio files.

  • +
  • See Also: https://datashare.is.ed.ac.uk/handle/10283/3443

  • +
+
+
+
+__getitem__(n: int) → Tuple[torch.Tensor, int, str, str, str][source]
+

Load the n-th sample from the dataset.

+
+
Parameters
+

n (int) – The index of the sample to be loaded

+
+
Returns
+

(waveform, sample_rate, utterance, speaker_id, utterance_id)

+
+
Return type
+

tuple

+
+
+
+ +
+ +
+
+

YESNO

+
+
+class torchaudio.datasets.YESNO(root: Union[str, pathlib.Path], url: str = 'http://www.openslr.org/resources/1/waves_yesno.tar.gz', folder_in_archive: str = 'waves_yesno', download: bool = False)[source]
+

Create a Dataset for YesNo.

+
+
Parameters
+
    +
  • root (str or Path) – Path to the directory where the dataset is found or downloaded.

  • +
  • url (str, optional) – The URL to download the dataset from. +(default: "http://www.openslr.org/resources/1/waves_yesno.tar.gz")

  • +
  • folder_in_archive (str, optional) – The top-level directory of the dataset. (default: "waves_yesno")

  • +
  • download (bool, optional) – Whether to download the dataset if it is not found at root path. (default: False).

  • +
+
+
+
+
+__getitem__(n: int) → Tuple[torch.Tensor, int, List[int]][source]
+

Load the n-th sample from the dataset.

+
+
Parameters
+

n (int) – The index of the sample to be loaded

+
+
Returns
+

(waveform, sample_rate, labels)

+
+
Return type
+

tuple

+
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/functional.html b/0.9.0/functional.html new file mode 100644 index 0000000000..d97a08baed --- /dev/null +++ b/0.9.0/functional.html @@ -0,0 +1,2106 @@ + + + + + + + + + + + + + torchaudio.functional — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • torchaudio.functional
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchaudio.functional

+

Functions to perform common audio operations.

+
+

Utility

+
+

amplitude_to_DB

+
+
+torchaudio.functional.amplitude_to_DB(x: torch.Tensor, multiplier: float, amin: float, db_multiplier: float, top_db: Optional[float] = None) → torch.Tensor[source]
+

Turn a spectrogram from the power/amplitude scale to the decibel scale.

+

The output of each tensor in a batch depends on the maximum value of that tensor, +and so may return different values for an audio clip split into snippets vs. a full clip.

+
+
Parameters
+
    +
  • x (Tensor) – Input spectrogram(s) before being converted to decibel scale. Input should take +the form (…, freq, time). Batched inputs should include a channel dimension and +have the form (batch, channel, freq, time).

  • +
  • multiplier (float) – Use 10. for power and 20. for amplitude

  • +
  • amin (float) – Number to clamp x

  • +
  • db_multiplier (float) – Log10(max(reference value and amin))

  • +
  • top_db (float or None, optional) – Minimum negative cut-off in decibels. A reasonable number +is 80. (Default: None)

  • +
+
+
Returns
+

Output tensor in decibel scale

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

DB_to_amplitude

+
+
+torchaudio.functional.DB_to_amplitude(x: torch.Tensor, ref: float, power: float) → torch.Tensor[source]
+

Turn a tensor from the decibel scale to the power/amplitude scale.

+
+
Parameters
+
    +
  • x (Tensor) – Input tensor before being converted to power/amplitude scale.

  • +
  • ref (float) – Reference which the output will be scaled by.

  • +
  • power (float) – If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude.

  • +
+
+
Returns
+

Output tensor in power/amplitude scale.

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

create_fb_matrix

+
+
+torchaudio.functional.create_fb_matrix(n_freqs: int, f_min: float, f_max: float, n_mels: int, sample_rate: int, norm: Optional[str] = None, mel_scale: str = 'htk') → torch.Tensor[source]
+

Create a frequency bin conversion matrix.

+
+
Parameters
+
    +
  • n_freqs (int) – Number of frequencies to highlight/apply

  • +
  • f_min (float) – Minimum frequency (Hz)

  • +
  • f_max (float) – Maximum frequency (Hz)

  • +
  • n_mels (int) – Number of mel filterbanks

  • +
  • sample_rate (int) – Sample rate of the audio waveform

  • +
  • norm (Optional[str]) – If ‘slaney’, divide the triangular mel weights by the width of the mel band

  • +
  • normalization). (Default ((area) – None)

  • +
  • mel_scale (str, optional) – Scale to use: htk or slaney. (Default: htk)

  • +
+
+
Returns
+

Triangular filter banks (fb matrix) of size (n_freqs, n_mels) +meaning number of frequencies to highlight/apply to x the number of filterbanks. +Each column is a filterbank so that assuming there is a matrix A of +size (…, n_freqs), the applied result would be +A * create_fb_matrix(A.size(-1), ...).

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

create_dct

+
+
+torchaudio.functional.create_dct(n_mfcc: int, n_mels: int, norm: Optional[str]) → torch.Tensor[source]
+

Create a DCT transformation matrix with shape (n_mels, n_mfcc), +normalized depending on norm.

+
+
Parameters
+
    +
  • n_mfcc (int) – Number of mfc coefficients to retain

  • +
  • n_mels (int) – Number of mel filterbanks

  • +
  • norm (str or None) – Norm to use (either ‘ortho’ or None)

  • +
+
+
Returns
+

The transformation matrix, to be right-multiplied to +row-wise data of size (n_mels, n_mfcc).

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

mask_along_axis

+
+
+torchaudio.functional.mask_along_axis(specgram: torch.Tensor, mask_param: int, mask_value: float, axis: int) → torch.Tensor[source]
+

Apply a mask along axis. Mask will be applied from indices [v_0, v_0 + v), where +v is sampled from uniform(0, mask_param), and v_0 from uniform(0, max_v - v). +All examples will have the same mask interval.

+
+
Parameters
+
    +
  • specgram (Tensor) – Real spectrogram (channel, freq, time)

  • +
  • mask_param (int) – Number of columns to be masked will be uniformly sampled from [0, mask_param]

  • +
  • mask_value (float) – Value to assign to the masked columns

  • +
  • axis (int) – Axis to apply masking on (1 -> frequency, 2 -> time)

  • +
+
+
Returns
+

Masked spectrogram of dimensions (channel, freq, time)

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

mask_along_axis_iid

+
+
+torchaudio.functional.mask_along_axis_iid(specgrams: torch.Tensor, mask_param: int, mask_value: float, axis: int) → torch.Tensor[source]
+

Apply a mask along axis. Mask will be applied from indices [v_0, v_0 + v), where +v is sampled from uniform(0, mask_param), and v_0 from uniform(0, max_v - v).

+
+
Parameters
+
    +
  • specgrams (Tensor) – Real spectrograms (batch, channel, freq, time)

  • +
  • mask_param (int) – Number of columns to be masked will be uniformly sampled from [0, mask_param]

  • +
  • mask_value (float) – Value to assign to the masked columns

  • +
  • axis (int) – Axis to apply masking on (2 -> frequency, 3 -> time)

  • +
+
+
Returns
+

Masked spectrograms of dimensions (batch, channel, freq, time)

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

mu_law_encoding

+
+
+torchaudio.functional.mu_law_encoding(x: torch.Tensor, quantization_channels: int) → torch.Tensor[source]
+

Encode signal based on mu-law companding. For more info see the +Wikipedia Entry

+

This algorithm assumes the signal has been scaled to between -1 and 1 and +returns a signal encoded with values from 0 to quantization_channels - 1.

+
+
Parameters
+
    +
  • x (Tensor) – Input tensor

  • +
  • quantization_channels (int) – Number of channels

  • +
+
+
Returns
+

Input after mu-law encoding

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

mu_law_decoding

+
+
+torchaudio.functional.mu_law_decoding(x_mu: torch.Tensor, quantization_channels: int) → torch.Tensor[source]
+

Decode mu-law encoded signal. For more info see the +Wikipedia Entry

+

This expects an input with values between 0 and quantization_channels - 1 +and returns a signal scaled between -1 and 1.

+
+
Parameters
+
    +
  • x_mu (Tensor) – Input tensor

  • +
  • quantization_channels (int) – Number of channels

  • +
+
+
Returns
+

Input after mu-law decoding

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

apply_codec

+
+
+torchaudio.functional.apply_codec(waveform: torch.Tensor, sample_rate: int, format: str, channels_first: bool = True, compression: Optional[float] = None, encoding: Optional[str] = None, bits_per_sample: Optional[int] = None) → torch.Tensor[source]
+

Apply codecs as a form of augmentation.

+
+
Parameters
+
    +
  • waveform (Tensor) – Audio data. Must be 2 dimensional. See also `channels_first`.

  • +
  • sample_rate (int) – Sample rate of the audio waveform.

  • +
  • format (str) – File format.

  • +
  • channels_first (bool) – When True, both the input and output Tensor have dimension [channel, time]. +Otherwise, they have dimension [time, channel].

  • +
  • compression (float) – Used for formats other than WAV. +For more details see torchaudio.backend.sox_io_backend.save().

  • +
  • encoding (str, optional) – Changes the encoding for the supported formats. +For more details see torchaudio.backend.sox_io_backend.save().

  • +
  • bits_per_sample (int, optional) – Changes the bit depth for the supported formats. +For more details see torchaudio.backend.sox_io_backend.save().

  • +
+
+
Returns
+

Resulting Tensor. +If channels_first=True, it has [channel, time] else [time, channel].

+
+
Return type
+

torch.Tensor

+
+
+
+ +
+
+

resample

+
+
+torchaudio.functional.resample(waveform: torch.Tensor, orig_freq: float, new_freq: float, lowpass_filter_width: int = 6, rolloff: float = 0.99, resampling_method: str = 'sinc_interpolation', beta: Optional[float] = None) → torch.Tensor[source]
+

Resamples the waveform at the new frequency using bandlimited interpolation.

+

https://ccrma.stanford.edu/~jos/resample/Theory_Ideal_Bandlimited_Interpolation.html

+
+

Note

+

transforms.Resample precomputes and reuses the resampling kernel, so using it will result in +more efficient computation if resampling multiple waveforms with the same resampling parameters.

+
+
+
Parameters
+
    +
  • waveform (Tensor) – The input signal of dimension (…, time)

  • +
  • orig_freq (float) – The original frequency of the signal

  • +
  • new_freq (float) – The desired frequency

  • +
  • lowpass_filter_width (int, optional) – Controls the sharpness of the filter, more == sharper +but less efficient. (Default: 6)

  • +
  • rolloff (float, optional) – The roll-off frequency of the filter, as a fraction of the Nyquist. +Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: 0.99)

  • +
  • resampling_method (str, optional) – The resampling method to use. +Options: [sinc_interpolation, kaiser_window] (Default: 'sinc_interpolation')

  • +
  • beta (float or None) – The shape parameter used for kaiser window.

  • +
+
+
Returns
+

The waveform at the new frequency of dimension (…, time).

+
+
Return type
+

Tensor

+
+
+
+ +
+
+
+

Complex Utility

+

Utilities for pseudo complex tensor. This is not for the native complex dtype, such as cfloat64, but for tensors with real-value type and have extra dimension at the end for real and imaginary parts.

+
+

angle

+
+
+torchaudio.functional.angle(complex_tensor: torch.Tensor) → torch.Tensor[source]
+

Compute the angle of complex tensor input.

+
+
Parameters
+

complex_tensor (Tensor) – Tensor shape of (…, complex=2)

+
+
Returns
+

Angle of a complex tensor. Shape of (…, )

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

complex_norm

+
+
+torchaudio.functional.complex_norm(complex_tensor: torch.Tensor, power: float = 1.0) → torch.Tensor[source]
+

Compute the norm of complex tensor input.

+
+
Parameters
+
    +
  • complex_tensor (Tensor) – Tensor shape of (…, complex=2)

  • +
  • power (float) – Power of the norm. (Default: 1.0).

  • +
+
+
Returns
+

Power of the normed input tensor. Shape of (…, )

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

magphase

+
+
+torchaudio.functional.magphase(complex_tensor: torch.Tensor, power: float = 1.0) → Tuple[torch.Tensor, torch.Tensor][source]
+

Separate a complex-valued spectrogram with shape (…, 2) into its magnitude and phase.

+
+
Parameters
+
    +
  • complex_tensor (Tensor) – Tensor shape of (…, complex=2)

  • +
  • power (float) – Power of the norm. (Default: 1.0)

  • +
+
+
Returns
+

The magnitude and phase of the complex tensor

+
+
Return type
+

(Tensor, Tensor)

+
+
+
+ +
+
+
+

Filtering

+
+

allpass_biquad

+
+
+torchaudio.functional.allpass_biquad(waveform: torch.Tensor, sample_rate: int, central_freq: float, Q: float = 0.707) → torch.Tensor[source]
+

Design two-pole all-pass filter. Similar to SoX implementation.

+
+
Parameters
+
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+
+

band_biquad

+
+
+torchaudio.functional.band_biquad(waveform: torch.Tensor, sample_rate: int, central_freq: float, Q: float = 0.707, noise: bool = False) → torch.Tensor[source]
+

Design two-pole band filter. Similar to SoX implementation.

+
+
Parameters
+
    +
  • waveform (Tensor) – audio waveform of dimension of (…, time)

  • +
  • sample_rate (int) – sampling rate of the waveform, e.g. 44100 (Hz)

  • +
  • central_freq (float or torch.Tensor) – central frequency (in Hz)

  • +
  • Q (float or torch.Tensor, optional) – https://en.wikipedia.org/wiki/Q_factor (Default: 0.707).

  • +
  • noise (bool, optional) – If True, uses the alternate mode for un-pitched audio (e.g. percussion). +If False, uses mode oriented to pitched audio, i.e. voice, singing, +or instrumental music (Default: False).

  • +
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+
+

bandpass_biquad

+
+
+torchaudio.functional.bandpass_biquad(waveform: torch.Tensor, sample_rate: int, central_freq: float, Q: float = 0.707, const_skirt_gain: bool = False) → torch.Tensor[source]
+

Design two-pole band-pass filter. Similar to SoX implementation.

+
+
Parameters
+
    +
  • waveform (Tensor) – audio waveform of dimension of (…, time)

  • +
  • sample_rate (int) – sampling rate of the waveform, e.g. 44100 (Hz)

  • +
  • central_freq (float or torch.Tensor) – central frequency (in Hz)

  • +
  • Q (float or torch.Tensor, optional) – https://en.wikipedia.org/wiki/Q_factor (Default: 0.707)

  • +
  • const_skirt_gain (bool, optional) – If True, uses a constant skirt gain (peak gain = Q). +If False, uses a constant 0dB peak gain. (Default: False)

  • +
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+
+

bandreject_biquad

+
+
+torchaudio.functional.bandreject_biquad(waveform: torch.Tensor, sample_rate: int, central_freq: float, Q: float = 0.707) → torch.Tensor[source]
+

Design two-pole band-reject filter. Similar to SoX implementation.

+
+
Parameters
+
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+
+

bass_biquad

+
+
+torchaudio.functional.bass_biquad(waveform: torch.Tensor, sample_rate: int, gain: float, central_freq: float = 100, Q: float = 0.707) → torch.Tensor[source]
+

Design a bass tone-control effect. Similar to SoX implementation.

+
+
Parameters
+
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+
+

biquad

+
+
+torchaudio.functional.biquad(waveform: torch.Tensor, b0: float, b1: float, b2: float, a0: float, a1: float, a2: float) → torch.Tensor[source]
+

Perform a biquad filter of input tensor. Initial conditions set to 0. +https://en.wikipedia.org/wiki/Digital_biquad_filter

+
+
Parameters
+
    +
  • waveform (Tensor) – audio waveform of dimension of (…, time)

  • +
  • b0 (float or torch.Tensor) – numerator coefficient of current input, x[n]

  • +
  • b1 (float or torch.Tensor) – numerator coefficient of input one time step ago x[n-1]

  • +
  • b2 (float or torch.Tensor) – numerator coefficient of input two time steps ago x[n-2]

  • +
  • a0 (float or torch.Tensor) – denominator coefficient of current output y[n], typically 1

  • +
  • a1 (float or torch.Tensor) – denominator coefficient of current output y[n-1]

  • +
  • a2 (float or torch.Tensor) – denominator coefficient of current output y[n-2]

  • +
+
+
Returns
+

Waveform with dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

contrast

+
+
+torchaudio.functional.contrast(waveform: torch.Tensor, enhancement_amount: float = 75.0) → torch.Tensor[source]
+

Apply contrast effect. Similar to SoX implementation. +Comparable with compression, this effect modifies an audio signal to make it sound louder

+
+
Parameters
+
    +
  • waveform (Tensor) – audio waveform of dimension of (…, time)

  • +
  • enhancement_amount (float) – controls the amount of the enhancement +Allowed range of values for enhancement_amount : 0-100 +Note that enhancement_amount = 0 still gives a significant contrast enhancement

  • +
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+
+

dcshift

+
+
+torchaudio.functional.dcshift(waveform: torch.Tensor, shift: float, limiter_gain: Optional[float] = None) → torch.Tensor[source]
+

Apply a DC shift to the audio. Similar to SoX implementation. +This can be useful to remove a DC offset +(caused perhaps by a hardware problem in the recording chain) from the audio

+
+
Parameters
+
    +
  • waveform (Tensor) – audio waveform of dimension of (…, time)

  • +
  • shift (float) – indicates the amount to shift the audio +Allowed range of values for shift : -2.0 to +2.0

  • +
  • limiter_gain (float) – It is used only on peaks to prevent clipping +It should have a value much less than 1 (e.g. 0.05 or 0.02)

  • +
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+
+

deemph_biquad

+
+
+torchaudio.functional.deemph_biquad(waveform: torch.Tensor, sample_rate: int) → torch.Tensor[source]
+

Apply ISO 908 CD de-emphasis (shelving) IIR filter. Similar to SoX implementation.

+
+
Parameters
+
    +
  • waveform (Tensor) – audio waveform of dimension of (…, time)

  • +
  • sample_rate (int) – sampling rate of the waveform, Allowed sample rate 44100 or 48000

  • +
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+
+

dither

+
+
+torchaudio.functional.dither(waveform: torch.Tensor, density_function: str = 'TPDF', noise_shaping: bool = False) → torch.Tensor[source]
+

Dither increases the perceived dynamic range of audio stored at a +particular bit-depth by eliminating nonlinear truncation distortion +(i.e. adding minimally perceived noise to mask distortion caused by quantization).

+
+
Parameters
+
    +
  • waveform (Tensor) – Tensor of audio of dimension (…, time)

  • +
  • density_function (str, optional) – The density function of a continuous random variable. One of +"TPDF" (Triangular Probability Density Function), +"RPDF" (Rectangular Probability Density Function) or +"GPDF" (Gaussian Probability Density Function) (Default: "TPDF").

  • +
  • noise_shaping (bool, optional) – a filtering process that shapes the spectral +energy of quantisation error (Default: False)

  • +
+
+
Returns
+

waveform dithered

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

equalizer_biquad

+
+
+torchaudio.functional.equalizer_biquad(waveform: torch.Tensor, sample_rate: int, center_freq: float, gain: float, Q: float = 0.707) → torch.Tensor[source]
+

Design biquad peaking equalizer filter and perform filtering. Similar to SoX implementation.

+
+
Parameters
+
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

flanger

+
+
+torchaudio.functional.flanger(waveform: torch.Tensor, sample_rate: int, delay: float = 0.0, depth: float = 2.0, regen: float = 0.0, width: float = 71.0, speed: float = 0.5, phase: float = 25.0, modulation: str = 'sinusoidal', interpolation: str = 'linear') → torch.Tensor[source]
+

Apply a flanger effect to the audio. Similar to SoX implementation.

+
+
Parameters
+
    +
  • waveform (Tensor) – audio waveform of dimension of (…, channel, time) . +Max 4 channels allowed

  • +
  • sample_rate (int) – sampling rate of the waveform, e.g. 44100 (Hz)

  • +
  • delay (float) – desired delay in milliseconds(ms) +Allowed range of values are 0 to 30

  • +
  • depth (float) – desired delay depth in milliseconds(ms) +Allowed range of values are 0 to 10

  • +
  • regen (float) – desired regen(feedback gain) in dB +Allowed range of values are -95 to 95

  • +
  • width (float) – desired width(delay gain) in dB +Allowed range of values are 0 to 100

  • +
  • speed (float) – modulation speed in Hz +Allowed range of values are 0.1 to 10

  • +
  • phase (float) – percentage phase-shift for multi-channel +Allowed range of values are 0 to 100

  • +
  • modulation (str) – Use either “sinusoidal” or “triangular” modulation. (Default: sinusoidal)

  • +
  • interpolation (str) – Use either “linear” or “quadratic” for delay-line interpolation. (Default: linear)

  • +
+
+
Returns
+

Waveform of dimension of (…, channel, time)

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+
+

gain

+
+
+torchaudio.functional.gain(waveform: torch.Tensor, gain_db: float = 1.0) → torch.Tensor[source]
+

Apply amplification or attenuation to the whole waveform.

+
+
Parameters
+
    +
  • waveform (Tensor) – Tensor of audio of dimension (…, time).

  • +
  • gain_db (float, optional) Gain adjustment in decibels (dB) – 1.0).

  • +
+
+
Returns
+

the whole waveform amplified by gain_db.

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

highpass_biquad

+
+
+torchaudio.functional.highpass_biquad(waveform: torch.Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707) → torch.Tensor[source]
+

Design biquad highpass filter and perform filtering. Similar to SoX implementation.

+
+
Parameters
+
+
+
Returns
+

Waveform dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

lfilter

+
+
+torchaudio.functional.lfilter(waveform: torch.Tensor, a_coeffs: torch.Tensor, b_coeffs: torch.Tensor, clamp: bool = True) → torch.Tensor[source]
+

Perform an IIR filter by evaluating difference equation.

+
+

Note

+

To avoid numerical problems, small filter order is preferred. +Using double precision could also minimize numerical precision errors.

+
+
+
Parameters
+
    +
  • waveform (Tensor) – audio waveform of dimension of (..., time). Must be normalized to -1 to 1.

  • +
  • a_coeffs (Tensor) – denominator coefficients of difference equation of dimension of (n_order + 1). +Lower delays coefficients are first, e.g. [a0, a1, a2, ...]. +Must be same size as b_coeffs (pad with 0’s as necessary).

  • +
  • b_coeffs (Tensor) – numerator coefficients of difference equation of dimension of (n_order + 1). +Lower delays coefficients are first, e.g. [b0, b1, b2, ...]. +Must be same size as a_coeffs (pad with 0’s as necessary).

  • +
  • clamp (bool, optional) – If True, clamp the output signal to be in the range [-1, 1] (Default: True)

  • +
+
+
Returns
+

Waveform with dimension of (..., time).

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

lowpass_biquad

+
+
+torchaudio.functional.lowpass_biquad(waveform: torch.Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707) → torch.Tensor[source]
+

Design biquad lowpass filter and perform filtering. Similar to SoX implementation.

+
+
Parameters
+
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

overdrive

+
+
+torchaudio.functional.overdrive(waveform: torch.Tensor, gain: float = 20, colour: float = 20) → torch.Tensor[source]
+

Apply a overdrive effect to the audio. Similar to SoX implementation. +This effect applies a non linear distortion to the audio signal.

+
+
Parameters
+
    +
  • waveform (Tensor) – audio waveform of dimension of (…, time)

  • +
  • gain (float) – desired gain at the boost (or attenuation) in dB +Allowed range of values are 0 to 100

  • +
  • colour (float) – controls the amount of even harmonic content in the over-driven output +Allowed range of values are 0 to 100

  • +
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+
+

phaser

+
+
+torchaudio.functional.phaser(waveform: torch.Tensor, sample_rate: int, gain_in: float = 0.4, gain_out: float = 0.74, delay_ms: float = 3.0, decay: float = 0.4, mod_speed: float = 0.5, sinusoidal: bool = True) → torch.Tensor[source]
+

Apply a phasing effect to the audio. Similar to SoX implementation.

+
+
Parameters
+
    +
  • waveform (Tensor) – audio waveform of dimension of (…, time)

  • +
  • sample_rate (int) – sampling rate of the waveform, e.g. 44100 (Hz)

  • +
  • gain_in (float) – desired input gain at the boost (or attenuation) in dB +Allowed range of values are 0 to 1

  • +
  • gain_out (float) – desired output gain at the boost (or attenuation) in dB +Allowed range of values are 0 to 1e9

  • +
  • delay_ms (float) – desired delay in milliseconds +Allowed range of values are 0 to 5.0

  • +
  • decay (float) – desired decay relative to gain-in +Allowed range of values are 0 to 0.99

  • +
  • mod_speed (float) – modulation speed in Hz +Allowed range of values are 0.1 to 2

  • +
  • sinusoidal (bool) – If True, uses sinusoidal modulation (preferable for multiple instruments) +If False, uses triangular modulation (gives single instruments a sharper phasing effect) +(Default: True)

  • +
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+
+

riaa_biquad

+
+
+torchaudio.functional.riaa_biquad(waveform: torch.Tensor, sample_rate: int) → torch.Tensor[source]
+

Apply RIAA vinyl playback equalization. Similar to SoX implementation.

+
+
Parameters
+
    +
  • waveform (Tensor) – audio waveform of dimension of (…, time)

  • +
  • sample_rate (int) – sampling rate of the waveform, e.g. 44100 (Hz). +Allowed sample rates in Hz : 44100,``48000``,``88200``,``96000``

  • +
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+
+

treble_biquad

+
+
+torchaudio.functional.treble_biquad(waveform: torch.Tensor, sample_rate: int, gain: float, central_freq: float = 3000, Q: float = 0.707) → torch.Tensor[source]
+

Design a treble tone-control effect. Similar to SoX implementation.

+
+
Parameters
+
+
+
Returns
+

Waveform of dimension of (…, time)

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+
+

vad

+
+
+
+

Feature Extractions

+
+
+torchaudio.functional.vad(waveform: torch.Tensor, sample_rate: int, trigger_level: float = 7.0, trigger_time: float = 0.25, search_time: float = 1.0, allowed_gap: float = 0.25, pre_trigger_time: float = 0.0, boot_time: float = 0.35, noise_up_time: float = 0.1, noise_down_time: float = 0.01, noise_reduction_amount: float = 1.35, measure_freq: float = 20.0, measure_duration: Optional[float] = None, measure_smooth_time: float = 0.4, hp_filter_freq: float = 50.0, lp_filter_freq: float = 6000.0, hp_lifter_freq: float = 150.0, lp_lifter_freq: float = 2000.0) → torch.Tensor[source]
+

Voice Activity Detector. Similar to SoX implementation. +Attempts to trim silence and quiet background sounds from the ends of recordings of speech. +The algorithm currently uses a simple cepstral power measurement to detect voice, +so may be fooled by other things, especially music.

+

The effect can trim only from the front of the audio, +so in order to trim from the back, the reverse effect must also be used.

+
+
Parameters
+
    +
  • waveform (Tensor) – Tensor of audio of dimension (channels, time) or (time) +Tensor of shape (channels, time) is treated as a multi-channel recording +of the same event and the resulting output will be trimmed to the earliest +voice activity in any channel.

  • +
  • sample_rate (int) – Sample rate of audio signal.

  • +
  • trigger_level (float, optional) – The measurement level used to trigger activity detection. +This may need to be cahnged depending on the noise level, signal level, +and other characteristics of the input audio. (Default: 7.0)

  • +
  • trigger_time (float, optional) – The time constant (in seconds) +used to help ignore short bursts of sound. (Default: 0.25)

  • +
  • search_time (float, optional) – The amount of audio (in seconds) +to search for quieter/shorter bursts of audio to include prior +to the detected trigger point. (Default: 1.0)

  • +
  • allowed_gap (float, optional) – The allowed gap (in seconds) between +quieter/shorter bursts of audio to include prior +to the detected trigger point. (Default: 0.25)

  • +
  • pre_trigger_time (float, optional) – The amount of audio (in seconds) to preserve +before the trigger point and any found quieter/shorter bursts. (Default: 0.0)

  • +
  • boot_time (float, optional) The algorithm (internally) – estimation/reduction in order to detect the start of the wanted audio. +This option sets the time for the initial noise estimate. (Default: 0.35)

  • +
  • noise_up_time (float, optional) – for when the noise level is increasing. (Default: 0.1)

  • +
  • noise_down_time (float, optional) – for when the noise level is decreasing. (Default: 0.01)

  • +
  • noise_reduction_amount (float, optional) – the detection algorithm (e.g. 0, 0.5, …). (Default: 1.35)

  • +
  • measure_freq (float, optional) – processing/measurements. (Default: 20.0)

  • +
  • measure_duration – (float, optional) Measurement duration. +(Default: Twice the measurement period; i.e. with overlap.)

  • +
  • measure_smooth_time (float, optional) – spectral measurements. (Default: 0.4)

  • +
  • hp_filter_freq (float, optional) – at the input to the detector algorithm. (Default: 50.0)

  • +
  • lp_filter_freq (float, optional) – at the input to the detector algorithm. (Default: 6000.0)

  • +
  • hp_lifter_freq (float, optional) – in the detector algorithm. (Default: 150.0)

  • +
  • lp_lifter_freq (float, optional) – in the detector algorithm. (Default: 2000.0)

  • +
+
+
Returns
+

Tensor of audio of dimension (…, time).

+
+
Return type
+

Tensor

+
+
+
+
Reference:
+
+
+
+ +
+

spectrogram

+
+
+torchaudio.functional.spectrogram(waveform: torch.Tensor, pad: int, window: torch.Tensor, n_fft: int, hop_length: int, win_length: int, power: Optional[float], normalized: bool, center: bool = True, pad_mode: str = 'reflect', onesided: bool = True, return_complex: bool = False) → torch.Tensor[source]
+

Create a spectrogram or a batch of spectrograms from a raw audio signal. +The spectrogram can be either magnitude-only or complex.

+
+
Parameters
+
    +
  • waveform (Tensor) – Tensor of audio of dimension (…, time)

  • +
  • pad (int) – Two sided padding of signal

  • +
  • window (Tensor) – Window tensor that is applied/multiplied to each frame/window

  • +
  • n_fft (int) – Size of FFT

  • +
  • hop_length (int) – Length of hop between STFT windows

  • +
  • win_length (int) – Window size

  • +
  • power (float or None) – Exponent for the magnitude spectrogram, +(must be > 0) e.g., 1 for energy, 2 for power, etc. +If None, then the complex spectrum is returned instead.

  • +
  • normalized (bool) – Whether to normalize by magnitude after stft

  • +
  • center (bool, optional) – whether to pad waveform on both sides so +that the \(t\)-th frame is centered at time \(t \times \text{hop\_length}\). +Default: True

  • +
  • pad_mode (string, optional) – controls the padding method used when +center is True. Default: "reflect"

  • +
  • onesided (bool, optional) – controls whether to return half of results to +avoid redundancy. Default: True

  • +
  • return_complex (bool, optional) – Indicates whether the resulting complex-valued Tensor should be represented with +native complex dtype, such as torch.cfloat and torch.cdouble, or real dtype +mimicking complex value with an extra dimension for real and imaginary parts. +This argument is only effective when power=None. +See also torch.view_as_real.

  • +
+
+
Returns
+

Dimension (…, freq, time), freq is +n_fft // 2 + 1 and n_fft is the number of +Fourier bins, and time is the number of window hops (n_frame).

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

griffinlim

+
+
+torchaudio.functional.griffinlim(specgram: torch.Tensor, window: torch.Tensor, n_fft: int, hop_length: int, win_length: int, power: float, n_iter: int, momentum: float, length: Optional[int], rand_init: bool) → torch.Tensor[source]
+

Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.

+

Implementation ported from +librosa [1], A fast Griffin-Lim algorithm [2] +and Signal estimation from modified short-time Fourier transform [3].

+
+
Parameters
+
    +
  • specgram (Tensor) – A magnitude-only STFT spectrogram of dimension (…, freq, frames) +where freq is n_fft // 2 + 1.

  • +
  • window (Tensor) – Window tensor that is applied/multiplied to each frame/window

  • +
  • n_fft (int) – Size of FFT, creates n_fft // 2 + 1 bins

  • +
  • hop_length (int) – Length of hop between STFT windows. ( +Default: win_length // 2)

  • +
  • win_length (int) – Window size. (Default: n_fft)

  • +
  • power (float) – Exponent for the magnitude spectrogram, +(must be > 0) e.g., 1 for energy, 2 for power, etc.

  • +
  • n_iter (int) – Number of iteration for phase recovery process.

  • +
  • momentum (float) – The momentum parameter for fast Griffin-Lim. +Setting this to 0 recovers the original Griffin-Lim method. +Values near 1 can lead to faster convergence, but above 1 may not converge.

  • +
  • length (int or None) – Array length of the expected output.

  • +
  • rand_init (bool) – Initializes phase randomly if True, to zero otherwise.

  • +
+
+
Returns
+

waveform of (…, time), where time equals the length parameter if given.

+
+
Return type
+

torch.Tensor

+
+
+
+ +
+
+

phase_vocoder

+
+
+torchaudio.functional.phase_vocoder(complex_specgrams: torch.Tensor, rate: float, phase_advance: torch.Tensor) → torch.Tensor[source]
+

Given a STFT tensor, speed up in time without modifying pitch by a +factor of rate.

+
+
Parameters
+
    +
  • complex_specgrams (Tensor) – Either a real tensor of dimension of (..., freq, num_frame, complex=2) +or a tensor of dimension (..., freq, num_frame) with complex dtype.

  • +
  • rate (float) – Speed-up factor

  • +
  • phase_advance (Tensor) – Expected phase advance in each bin. Dimension of (freq, 1)

  • +
+
+
Returns
+

Stretched spectrogram. The resulting tensor is of the same dtype as the input +spectrogram, but the number of frames is changed to ceil(num_frame / rate).

+
+
Return type
+

Tensor

+
+
+
+
Example - With Tensor of complex dtype
>>> freq, hop_length = 1025, 512
+>>> # (channel, freq, time)
+>>> complex_specgrams = torch.randn(2, freq, 300, dtype=torch.cfloat)
+>>> rate = 1.3 # Speed up by 30%
+>>> phase_advance = torch.linspace(
+>>>    0, math.pi * hop_length, freq)[..., None]
+>>> x = phase_vocoder(complex_specgrams, rate, phase_advance)
+>>> x.shape # with 231 == ceil(300 / 1.3)
+torch.Size([2, 1025, 231])
+
+
+
+
Example - With Tensor of real dtype and extra dimension for complex field
>>> freq, hop_length = 1025, 512
+>>> # (channel, freq, time, complex=2)
+>>> complex_specgrams = torch.randn(2, freq, 300, 2)
+>>> rate = 1.3 # Speed up by 30%
+>>> phase_advance = torch.linspace(
+>>>    0, math.pi * hop_length, freq)[..., None]
+>>> x = phase_vocoder(complex_specgrams, rate, phase_advance)
+>>> x.shape # with 231 == ceil(300 / 1.3)
+torch.Size([2, 1025, 231, 2])
+
+
+
+
+
+ +
+
+

compute_deltas

+
+
+torchaudio.functional.compute_deltas(specgram: torch.Tensor, win_length: int = 5, mode: str = 'replicate') → torch.Tensor[source]
+

Compute delta coefficients of a tensor, usually a spectrogram:

+
+\[d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N}} n^2} + +\]
+

where \(d_t\) is the deltas at time \(t\), +\(c_t\) is the spectrogram coeffcients at time \(t\), +\(N\) is (win_length-1)//2.

+
+
Parameters
+
    +
  • specgram (Tensor) – Tensor of audio of dimension (…, freq, time)

  • +
  • win_length (int, optional) – The window length used for computing delta (Default: 5)

  • +
  • mode (str, optional) – Mode parameter passed to padding (Default: "replicate")

  • +
+
+
Returns
+

Tensor of deltas of dimension (…, freq, time)

+
+
Return type
+

Tensor

+
+
+
+
Example
>>> specgram = torch.randn(1, 40, 1000)
+>>> delta = compute_deltas(specgram)
+>>> delta2 = compute_deltas(delta)
+
+
+
+
+
+ +
+
+

detect_pitch_frequency

+
+
+torchaudio.functional.detect_pitch_frequency(waveform: torch.Tensor, sample_rate: int, frame_time: float = 0.01, win_length: int = 30, freq_low: int = 85, freq_high: int = 3400) → torch.Tensor[source]
+

Detect pitch frequency.

+

It is implemented using normalized cross-correlation function and median smoothing.

+
+
Parameters
+
    +
  • waveform (Tensor) – Tensor of audio of dimension (…, freq, time)

  • +
  • sample_rate (int) – The sample rate of the waveform (Hz)

  • +
  • frame_time (float, optional) – Duration of a frame (Default: 10 ** (-2)).

  • +
  • win_length (int, optional) – The window length for median smoothing (in number of frames) (Default: 30).

  • +
  • freq_low (int, optional) – Lowest frequency that can be detected (Hz) (Default: 85).

  • +
  • freq_high (int, optional) – Highest frequency that can be detected (Hz) (Default: 3400).

  • +
+
+
Returns
+

Tensor of freq of dimension (…, frame)

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

sliding_window_cmn

+
+
+torchaudio.functional.sliding_window_cmn(specgram: torch.Tensor, cmn_window: int = 600, min_cmn_window: int = 100, center: bool = False, norm_vars: bool = False) → torch.Tensor[source]
+

Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.

+
+
Parameters
+
    +
  • specgram (Tensor) – Tensor of audio of dimension (…, time, freq)

  • +
  • cmn_window (int, optional) – Window in frames for running average CMN computation (int, default = 600)

  • +
  • min_cmn_window (int, optional) – Minimum CMN window used at start of decoding (adds latency only at start). +Only applicable if center == false, ignored if center==true (int, default = 100)

  • +
  • center (bool, optional) – If true, use a window centered on the current frame +(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)

  • +
  • norm_vars (bool, optional) – If true, normalize variance to one. (bool, default = false)

  • +
+
+
Returns
+

Tensor matching input shape (…, freq, time)

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

compute_kaldi_pitch

+
+
+torchaudio.functional.compute_kaldi_pitch(waveform: torch.Tensor, sample_rate: float, frame_length: float = 25.0, frame_shift: float = 10.0, min_f0: float = 50, max_f0: float = 400, soft_min_f0: float = 10.0, penalty_factor: float = 0.1, lowpass_cutoff: float = 1000, resample_frequency: float = 4000, delta_pitch: float = 0.005, nccf_ballast: float = 7000, lowpass_filter_width: int = 1, upsample_filter_width: int = 5, max_frames_latency: int = 0, frames_per_chunk: int = 0, simulate_first_pass_online: bool = False, recompute_frame: int = 500, snip_edges: bool = True) → torch.Tensor[source]
+

Extract pitch based on method described in A pitch extraction algorithm tuned +for automatic speech recognition [4].

+

This function computes the equivalent of compute-kaldi-pitch-feats from Kaldi.

+
+
Parameters
+
    +
  • waveform (Tensor) – The input waveform of shape (…, time).

  • +
  • sample_rate (float) – Sample rate of waveform.

  • +
  • frame_length (float, optional) – Frame length in milliseconds. (default: 25.0)

  • +
  • frame_shift (float, optional) – Frame shift in milliseconds. (default: 10.0)

  • +
  • min_f0 (float, optional) – Minimum F0 to search for (Hz) (default: 50.0)

  • +
  • max_f0 (float, optional) – Maximum F0 to search for (Hz) (default: 400.0)

  • +
  • soft_min_f0 (float, optional) – Minimum f0, applied in soft way, must not exceed min-f0 (default: 10.0)

  • +
  • penalty_factor (float, optional) – Cost factor for FO change. (default: 0.1)

  • +
  • lowpass_cutoff (float, optional) – Cutoff frequency for LowPass filter (Hz) (default: 1000)

  • +
  • resample_frequency (float, optional) – Frequency that we down-sample the signal to. Must be more than twice lowpass-cutoff. +(default: 4000)

  • +
  • delta_pitch (float, optional) – Smallest relative change in pitch that our algorithm measures. (default: 0.005)

  • +
  • nccf_ballast (float, optional) – Increasing this factor reduces NCCF for quiet frames (default: 7000)

  • +
  • lowpass_filter_width (int, optional) – Integer that determines filter width of lowpass filter, more gives sharper filter. +(default: 1)

  • +
  • upsample_filter_width (int, optional) – Integer that determines filter width when upsampling NCCF. (default: 5)

  • +
  • max_frames_latency (int, optional) – Maximum number of frames of latency that we allow pitch tracking to introduce into +the feature processing (affects output only if frames_per_chunk > 0 and +simulate_first_pass_online=True) (default: 0)

  • +
  • frames_per_chunk (int, optional) – The number of frames used for energy normalization. (default: 0)

  • +
  • simulate_first_pass_online (bool, optional) – If true, the function will output features that correspond to what an online decoder +would see in the first pass of decoding – not the final version of the features, +which is the default. (default: False) +Relevant if frames_per_chunk > 0.

  • +
  • recompute_frame (int, optional) – Only relevant for compatibility with online pitch extraction. +A non-critical parameter; the frame at which we recompute some of the forward pointers, +after revising our estimate of the signal energy. +Relevant if frames_per_chunk > 0. (default: 500)

  • +
  • snip_edges (bool, optional) – If this is set to false, the incomplete frames near the ending edge won’t be snipped, +so that the number of frames is the file size divided by the frame-shift. +This makes different types of features give the same number of frames. (default: True)

  • +
+
+
Returns
+

Pitch feature. Shape: (batch, frames 2) where the last dimension +corresponds to pitch and NCCF.

+
+
Return type
+

Tensor

+
+
+
+ +
+
+

spectral_centroid

+
+
+torchaudio.functional.spectral_centroid(waveform: torch.Tensor, sample_rate: int, pad: int, window: torch.Tensor, n_fft: int, hop_length: int, win_length: int) → torch.Tensor[source]
+

Compute the spectral centroid for each channel along the time axis.

+

The spectral centroid is defined as the weighted average of the +frequency values, weighted by their magnitude.

+
+
Parameters
+
    +
  • waveform (Tensor) – Tensor of audio of dimension (…, time)

  • +
  • sample_rate (int) – Sample rate of the audio waveform

  • +
  • pad (int) – Two sided padding of signal

  • +
  • window (Tensor) – Window tensor that is applied/multiplied to each frame/window

  • +
  • n_fft (int) – Size of FFT

  • +
  • hop_length (int) – Length of hop between STFT windows

  • +
  • win_length (int) – Window size

  • +
+
+
Returns
+

Dimension (…, time)

+
+
Return type
+

Tensor

+
+
+
+ +
+
+
+

References

+

+
1
+

Brian McFee, Colin Raffel, Dawen Liang, Daniel P.W. Ellis, Matt McVicar, Eric Battenberg, and Oriol Nieto. Librosa: Audio and Music Signal Analysis in Python. In Kathryn Huff and James Bergstra, editors, Proceedings of the 14th Python in Science Conference, 18 – 24. 2015. doi:10.25080/Majora-7b98e3ed-003.

+
+
2
+

Nathanaël Perraudin, Peter Balazs, and Peter L. Søndergaard. A fast griffin-lim algorithm. In 2013 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics, volume, 1–4. 2013. doi:10.1109/WASPAA.2013.6701851.

+
+
3
+

D. Griffin and Jae Lim. Signal estimation from modified short-time fourier transform. In ICASSP ‘83. IEEE International Conference on Acoustics, Speech, and Signal Processing, volume 8, 804–807. 1983. doi:10.1109/ICASSP.1983.1172092.

+
+
4
+

Pegah Ghahremani, Bagher BabaAli, Daniel Povey, Korbinian Riedhammer, Jan Trmal, and Sanjeev Khudanpur. A pitch extraction algorithm tuned for automatic speech recognition. In 2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), volume, 2494–2498. 2014. doi:10.1109/ICASSP.2014.6854049.

+
+
+

+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/genindex.html b/0.9.0/genindex.html new file mode 100644 index 0000000000..b7642b2c0d --- /dev/null +++ b/0.9.0/genindex.html @@ -0,0 +1,1125 @@ + + + + + + + + + + + + + Index — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ + +

Index

+ +
+ _ + | A + | B + | C + | D + | E + | F + | G + | H + | I + | L + | M + | O + | P + | R + | S + | T + | V + | W + | Y + +
+

_

+ + +
+ +

A

+ + + +
+ +

B

+ + + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + + +
+ +

G

+ + + +
+ +

H

+ + +
+ +

I

+ + + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

O

+ + +
+ +

P

+ + + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

V

+ + + +
+ +

W

+ + + +
+ +

Y

+ + +
+ + + +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/index.html b/0.9.0/index.html new file mode 100644 index 0000000000..7ebf90a8e3 --- /dev/null +++ b/0.9.0/index.html @@ -0,0 +1,763 @@ + + + + + + + + + + + + + torchaudio — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchaudio

+

This library is part of the PyTorch project. PyTorch is an open source +machine learning framework.

+

Features described in this documentation are classified by release status:

+
+

Stable: These features will be maintained long-term and there should generally +be no major performance limitations or gaps in documentation. +We also expect to maintain backwards compatibility (although +breaking changes can happen and notice will be given one release ahead +of time).

+

Beta: Features are tagged as Beta because the API may change based on +user feedback, because the performance needs to improve, or because +coverage across operators is not yet complete. For Beta features, we are +committing to seeing the feature through to the Stable classification. +We are not, however, committing to backwards compatibility.

+

Prototype: These features are typically not available as part of +binary distributions like PyPI or Conda, except sometimes behind run-time +flags, and are at an early stage for feedback and testing.

+
+

The torchaudio package consists of I/O, popular datasets and common audio transformations.

+ + +
+ + +
+ +
+
+ + + + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ + +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/kaldi_io.html b/0.9.0/kaldi_io.html new file mode 100644 index 0000000000..98da849e11 --- /dev/null +++ b/0.9.0/kaldi_io.html @@ -0,0 +1,798 @@ + + + + + + + + + + + + + torchaudio.kaldi_io — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • torchaudio.kaldi_io
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchaudio.kaldi_io

+

To use this module, the dependency kaldi_io needs to be installed. +This is a light wrapper around kaldi_io that returns torch.Tensor.

+
+

Vectors

+
+

read_vec_int_ark

+
+
+torchaudio.kaldi_io.read_vec_int_ark(file_or_fd: Any) → Iterable[Tuple[str, torch.Tensor]][source]
+

Create generator of (key,vector<int>) tuples, which reads from the ark file/stream.

+
+
Parameters
+

file_or_fd (str/FileDescriptor) – ark, gzipped ark, pipe or opened file descriptor

+
+
Returns
+

The string is the key and the tensor is the vector read from file

+
+
Return type
+

Iterable[Tuple[str, Tensor]]

+
+
+
+
Example
>>> # read ark to a 'dictionary'
+>>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_int_ark(file) }
+
+
+
+
+
+ +
+
+

read_vec_flt_scp

+
+
+torchaudio.kaldi_io.read_vec_flt_scp(file_or_fd: Any) → Iterable[Tuple[str, torch.Tensor]][source]
+

Create generator of (key,vector<float32/float64>) tuples, read according to Kaldi scp.

+
+
Parameters
+

file_or_fd (str/FileDescriptor) – scp, gzipped scp, pipe or opened file descriptor

+
+
Returns
+

The string is the key and the tensor is the vector read from file

+
+
Return type
+

Iterable[Tuple[str, Tensor]]

+
+
+
+
Example
>>> # read scp to a 'dictionary'
+>>> # d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_scp(file) }
+
+
+
+
+
+ +
+
+

read_vec_flt_ark

+
+
+torchaudio.kaldi_io.read_vec_flt_ark(file_or_fd: Any) → Iterable[Tuple[str, torch.Tensor]][source]
+

Create generator of (key,vector<float32/float64>) tuples, which reads from the ark file/stream.

+
+
Parameters
+

file_or_fd (str/FileDescriptor) – ark, gzipped ark, pipe or opened file descriptor

+
+
Returns
+

The string is the key and the tensor is the vector read from file

+
+
Return type
+

Iterable[Tuple[str, Tensor]]

+
+
+
+
Example
>>> # read ark to a 'dictionary'
+>>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_ark(file) }
+
+
+
+
+
+ +
+
+
+

Matrices

+
+

read_mat_scp

+
+
+torchaudio.kaldi_io.read_mat_scp(file_or_fd: Any) → Iterable[Tuple[str, torch.Tensor]][source]
+

Create generator of (key,matrix<float32/float64>) tuples, read according to Kaldi scp.

+
+
Parameters
+

file_or_fd (str/FileDescriptor) – scp, gzipped scp, pipe or opened file descriptor

+
+
Returns
+

The string is the key and the tensor is the matrix read from file

+
+
Return type
+

Iterable[Tuple[str, Tensor]]

+
+
+
+
Example
>>> # read scp to a 'dictionary'
+>>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_scp(file) }
+
+
+
+
+
+ +
+
+

read_mat_ark

+
+
+torchaudio.kaldi_io.read_mat_ark(file_or_fd: Any) → Iterable[Tuple[str, torch.Tensor]][source]
+

Create generator of (key,matrix<float32/float64>) tuples, which reads from the ark file/stream.

+
+
Parameters
+

file_or_fd (str/FileDescriptor) – ark, gzipped ark, pipe or opened file descriptor

+
+
Returns
+

The string is the key and the tensor is the matrix read from file

+
+
Return type
+

Iterable[Tuple[str, Tensor]]

+
+
+
+
Example
>>> # read ark to a 'dictionary'
+>>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_ark(file) }
+
+
+
+
+
+ +
+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/models.html b/0.9.0/models.html new file mode 100644 index 0000000000..8da9f1803f --- /dev/null +++ b/0.9.0/models.html @@ -0,0 +1,1155 @@ + + + + + + + + + + + + + torchaudio.models — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • torchaudio.models
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchaudio.models

+

The models subpackage contains definitions of models for addressing common audio tasks.

+
+

ConvTasNet

+
+
+class torchaudio.models.ConvTasNet(num_sources: int = 2, enc_kernel_size: int = 16, enc_num_feats: int = 512, msk_kernel_size: int = 3, msk_num_feats: int = 128, msk_num_hidden_feats: int = 512, msk_num_layers: int = 8, msk_num_stacks: int = 3)[source]
+

Conv-TasNet: a fully-convolutional time-domain audio separation network +Conv-TasNet: Surpassing Ideal Time–Frequency Magnitude Masking for Speech Separation +[1].

+
+
Parameters
+
    +
  • num_sources (int) – The number of sources to split.

  • +
  • enc_kernel_size (int) – The convolution kernel size of the encoder/decoder, <L>.

  • +
  • enc_num_feats (int) – The feature dimensions passed to mask generator, <N>.

  • +
  • msk_kernel_size (int) – The convolution kernel size of the mask generator, <P>.

  • +
  • msk_num_feats (int) – The input/output feature dimension of conv block in the mask generator, <B, Sc>.

  • +
  • msk_num_hidden_feats (int) – The internal feature dimension of conv block of the mask generator, <H>.

  • +
  • msk_num_layers (int) – The number of layers in one conv block of the mask generator, <X>.

  • +
  • msk_num_stacks (int) – The numbr of conv blocks of the mask generator, <R>.

  • +
+
+
+
+

Note

+

This implementation corresponds to the “non-causal” setting in the paper.

+
+
+
+forward(input: torch.Tensor) → torch.Tensor[source]
+

Perform source separation. Generate audio source waveforms.

+
+
Parameters
+

input (torch.Tensor) – 3D Tensor with shape [batch, channel==1, frames]

+
+
Returns
+

3D Tensor with shape [batch, channel==num_sources, frames]

+
+
Return type
+

torch.Tensor

+
+
+
+ +
+ +
+
+

DeepSpeech

+
+
+class torchaudio.models.DeepSpeech(n_feature: int, n_hidden: int = 2048, n_class: int = 40, dropout: float = 0.0)[source]
+

DeepSpeech model architecture from Deep Speech: Scaling up end-to-end speech recognition +[2].

+
+
Parameters
+
    +
  • n_feature – Number of input features

  • +
  • n_hidden – Internal hidden unit size.

  • +
  • n_class – Number of output classes

  • +
+
+
+
+
+forward(x: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

x (torch.Tensor) – Tensor of dimension (batch, channel, time, feature).

+
+
Returns
+

Predictor tensor of dimension (batch, time, class).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

Wav2Letter

+
+
+class torchaudio.models.Wav2Letter(num_classes: int = 40, input_type: str = 'waveform', num_features: int = 1)[source]
+

Wav2Letter model architecture from Wav2Letter: an End-to-End ConvNet-based Speech +Recognition System [3].

+
+

\(\text{padding} = \frac{\text{ceil}(\text{kernel} - \text{stride})}{2}\)

+
+
+
Parameters
+
    +
  • num_classes (int, optional) – Number of classes to be classified. (Default: 40)

  • +
  • input_type (str, optional) – Wav2Letter can use as input: waveform, power_spectrum +or mfcc (Default: waveform).

  • +
  • num_features (int, optional) – Number of input features that the network will receive (Default: 1).

  • +
+
+
+
+
+forward(x: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

x (torch.Tensor) – Tensor of dimension (batch_size, num_features, input_length).

+
+
Returns
+

Predictor tensor of dimension (batch_size, number_of_classes, input_length).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

Wav2Vec2.0

+
+

Wav2Vec2Model

+
+
+class torchaudio.models.Wav2Vec2Model(feature_extractor: torch.nn.modules.module.Module, encoder: torch.nn.modules.module.Module)[source]
+

Encoder model used in wav2vec 2.0 [4].

+
+

Note

+

To build the model, please use one of the factory functions.

+
+
+
Parameters
+
    +
  • feature_extractor (torch.nn.Module) – Feature extractor that extracts feature vectors from raw audio Tensor.

  • +
  • encoder (torch.nn.Module) – Encoder that converts the audio features into the sequence of probability +distribution (in negative log-likelihood) over labels.

  • +
+
+
+
+
+extract_features(waveforms: torch.Tensor, lengths: Optional[torch.Tensor] = None) → Tuple[torch.Tensor, Optional[torch.Tensor]][source]
+

Extract feature vectors from raw waveforms

+
+
Parameters
+
    +
  • waveforms (Tensor) – Audio tensor of shape (batch, frames).

  • +
  • lengths (Tensor, optional) – Indicates the valid length of each audio sample in the batch. +Shape: (batch, ).

  • +
+
+
Returns
+

+
Feature vectors.

Shape: (batch, frames, feature dimention)

+
+
Tensor, optional:

Indicates the valid length of each feature in the batch, computed +based on the given lengths argument. +Shape: (batch, ).

+
+
+

+
+
Return type
+

Tensor

+
+
+
+ +
+
+forward(waveforms: torch.Tensor, lengths: Optional[torch.Tensor] = None) → Tuple[torch.Tensor, Optional[torch.Tensor]][source]
+

Compute the sequence of probability distribution over labels.

+
+
Parameters
+
    +
  • waveforms (Tensor) – Audio tensor of shape (batch, frames).

  • +
  • lengths (Tensor, optional) – Indicates the valid length of each audio sample in the batch. +Shape: (batch, ).

  • +
+
+
Returns
+

+
The sequences of probability distribution (in logit) over labels.

Shape: (batch, frames, num labels).

+
+
Tensor, optional:

Indicates the valid length of each feature in the batch, computed +based on the given lengths argument. +Shape: (batch, ).

+
+
+

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

Factory Functions

+
+
+

wav2vec2_base

+
+
+torchaudio.models.wav2vec2_base(num_out: int) → torchaudio.models.wav2vec2.model.Wav2Vec2Model[source]
+

Build wav2vec2.0 model with “Base” configuration from wav2vec 2.0 [4].

+
+
Parameters
+

num_out – int +The number of output labels.

+
+
Returns
+

The resulting model.

+
+
Return type
+

Wav2Vec2Model

+
+
+
+
Example - Reload fine-tuned model from Hugging Face:
>>> # Session 1 - Convert pretrained model from Hugging Face and save the parameters.
+>>> from torchaudio.models.wav2vec2.utils import import_huggingface_model
+>>>
+>>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
+>>> model = import_huggingface_model(original)
+>>> torch.save(model.state_dict(), "wav2vec2-base-960h.pt")
+>>>
+>>> # Session 2 - Load model and the parameters
+>>> model = wav2vec2_base(num_out=32)
+>>> model.load_state_dict(torch.load("wav2vec2-base-960h.pt"))
+
+
+
+
+
+ +
+
+

wav2vec2_large

+
+
+torchaudio.models.wav2vec2_large(num_out: int) → torchaudio.models.wav2vec2.model.Wav2Vec2Model[source]
+

Build wav2vec2.0 model with “Large” configuration from wav2vec 2.0 [4].

+
+
Parameters
+

num_out – int +The number of output labels.

+
+
Returns
+

The resulting model.

+
+
Return type
+

Wav2Vec2Model

+
+
+
+
Example - Reload fine-tuned model from Hugging Face:
>>> # Session 1 - Convert pretrained model from Hugging Face and save the parameters.
+>>> from torchaudio.models.wav2vec2.utils import import_huggingface_model
+>>>
+>>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h")
+>>> model = import_huggingface_model(original)
+>>> torch.save(model.state_dict(), "wav2vec2-base-960h.pt")
+>>>
+>>> # Session 2 - Load model and the parameters
+>>> model = wav2vec2_large(num_out=32)
+>>> model.load_state_dict(torch.load("wav2vec2-base-960h.pt"))
+
+
+
+
+
+ +
+
+

wav2vec2_large_lv60k

+
+
+torchaudio.models.wav2vec2_large_lv60k(num_out: int) → torchaudio.models.wav2vec2.model.Wav2Vec2Model[source]
+

Build wav2vec2.0 model with “Large LV-60k” configuration from wav2vec 2.0 [4].

+
+
Parameters
+

num_out – int +The number of output labels.

+
+
Returns
+

The resulting model.

+
+
Return type
+

Wav2Vec2Model

+
+
+
+
Example - Reload fine-tuned model from Hugging Face:
>>> # Session 1 - Convert pretrained model from Hugging Face and save the parameters.
+>>> from torchaudio.models.wav2vec2.utils import import_huggingface_model
+>>>
+>>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self")
+>>> model = import_huggingface_model(original)
+>>> torch.save(model.state_dict(), "wav2vec2-base-960h.pt")
+>>>
+>>> # Session 2 - Load model and the parameters
+>>> model = wav2vec2_large_lv60k(num_out=32)
+>>> model.load_state_dict(torch.load("wav2vec2-base-960h.pt"))
+
+
+
+
+
+ +
+
+

Utility Functions

+
+
+

import_huggingface_model

+
+
+torchaudio.models.wav2vec2.utils.import_huggingface_model(original: torch.nn.modules.module.Module) → torchaudio.models.wav2vec2.model.Wav2Vec2Model[source]
+

Import wav2vec2 model from Hugging Face’s Transformers.

+
+
Parameters
+

original (torch.nn.Module) – An instance of Wav2Vec2ForCTC from transformers.

+
+
Returns
+

Imported model.

+
+
Return type
+

Wav2Vec2Model

+
+
+
+
Example
>>> from torchaudio.models.wav2vec2.utils import import_huggingface_model
+>>>
+>>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
+>>> model = import_huggingface_model(original)
+>>>
+>>> waveforms, _ = torchaudio.load("audio.wav")
+>>> logits, _ = model(waveforms)
+
+
+
+
+
+ +
+
+

import_fairseq_model

+
+
+torchaudio.models.wav2vec2.utils.import_fairseq_model(original: torch.nn.modules.module.Module, num_out: Optional[int] = None) → torchaudio.models.wav2vec2.model.Wav2Vec2Model[source]
+

Build Wav2Vec2Model from pretrained parameters published by fairseq.

+
+
Parameters
+
    +
  • original (torch.nn.Module) – An instance of fairseq’s Wav2Vec2.0 model class. +Either fairseq.models.wav2vec.wav2vec2_asr.Wav2VecEncoder or +fairseq.models.wav2vec.wav2vec2.Wav2Vec2Model.

  • +
  • num_out (int, optional) – The number of output labels. Required only when the original model is +an instance of fairseq.models.wav2vec.wav2vec2.Wav2Vec2Model.

  • +
+
+
Returns
+

Imported model.

+
+
Return type
+

Wav2Vec2Model

+
+
+
+
Example - Loading pretrain-only model
>>> from torchaudio.models.wav2vec2.utils import import_fairseq_model
+>>>
+>>> # Load model using fairseq
+>>> model_file = 'wav2vec_small.pt'
+>>> model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file])
+>>> original = model[0]
+>>> imported = import_fairseq_model(original, num_out=28)
+>>>
+>>> # Perform feature extraction
+>>> waveform, _ = torchaudio.load('audio.wav')
+>>> features, _ = imported.extract_features(waveform)
+>>>
+>>> # Compare result with the original model from fairseq
+>>> reference = original.feature_extractor(waveform).transpose(1, 2)
+>>> torch.testing.assert_allclose(features, reference)
+
+
+
+
Example - Fine-tuned model
>>> from torchaudio.models.wav2vec2.utils import import_fairseq_model
+>>>
+>>> # Load model using fairseq
+>>> model_file = 'wav2vec_small_960h.pt'
+>>> model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file])
+>>> original = model[0]
+>>> imported = import_fairseq_model(original.w2v_encoder)
+>>>
+>>> # Perform encoding
+>>> waveform, _ = torchaudio.load('audio.wav')
+>>> emission, _ = imported(waveform)
+>>>
+>>> # Compare result with the original model from fairseq
+>>> mask = torch.zeros_like(waveform)
+>>> reference = original(waveform, mask)['encoder_out'].transpose(0, 1)
+>>> torch.testing.assert_allclose(emission, reference)
+
+
+
+
+
+ +
+
+
+

WaveRNN

+
+
+class torchaudio.models.WaveRNN(upsample_scales: List[int], n_classes: int, hop_length: int, n_res_block: int = 10, n_rnn: int = 512, n_fc: int = 512, kernel_size: int = 5, n_freq: int = 128, n_hidden: int = 128, n_output: int = 128)[source]
+

WaveRNN model based on the implementation from fatchord.

+

The original implementation was introduced in Efficient Neural Audio Synthesis +[5]. The input channels of waveform and spectrogram have to be 1. +The product of upsample_scales must equal hop_length.

+
+
Parameters
+
    +
  • upsample_scales – the list of upsample scales.

  • +
  • n_classes – the number of output classes.

  • +
  • hop_length – the number of samples between the starts of consecutive frames.

  • +
  • n_res_block – the number of ResBlock in stack. (Default: 10)

  • +
  • n_rnn – the dimension of RNN layer. (Default: 512)

  • +
  • n_fc – the dimension of fully connected layer. (Default: 512)

  • +
  • kernel_size – the number of kernel size in the first Conv1d layer. (Default: 5)

  • +
  • n_freq – the number of bins in a spectrogram. (Default: 128)

  • +
  • n_hidden – the number of hidden dimensions of resblock. (Default: 128)

  • +
  • n_output – the number of output dimensions of melresnet. (Default: 128)

  • +
+
+
+
+
Example
>>> wavernn = WaveRNN(upsample_scales=[5,5,8], n_classes=512, hop_length=200)
+>>> waveform, sample_rate = torchaudio.load(file)
+>>> # waveform shape: (n_batch, n_channel, (n_time - kernel_size + 1) * hop_length)
+>>> specgram = MelSpectrogram(sample_rate)(waveform)  # shape: (n_batch, n_channel, n_freq, n_time)
+>>> output = wavernn(waveform, specgram)
+>>> # output shape: (n_batch, n_channel, (n_time - kernel_size + 1) * hop_length, n_classes)
+
+
+
+
+
+
+forward(waveform: torch.Tensor, specgram: torch.Tensor) → torch.Tensor[source]
+

Pass the input through the WaveRNN model.

+
+
Parameters
+
    +
  • waveform – the input waveform to the WaveRNN layer (n_batch, 1, (n_time - kernel_size + 1) * hop_length)

  • +
  • specgram – the input spectrogram to the WaveRNN layer (n_batch, 1, n_freq, n_time)

  • +
+
+
Returns
+

(n_batch, 1, (n_time - kernel_size + 1) * hop_length, n_classes)

+
+
Return type
+

Tensor shape

+
+
+
+ +
+ +
+
+

References

+

+
1
+

Yi Luo and Nima Mesgarani. Conv-tasnet: surpassing ideal time–frequency magnitude masking for speech separation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 27(8):1256–1266, Aug 2019. URL: http://dx.doi.org/10.1109/TASLP.2019.2915167, doi:10.1109/taslp.2019.2915167.

+
+
2
+

Awni Hannun, Carl Case, Jared Casper, Bryan Catanzaro, Greg Diamos, Erich Elsen, Ryan Prenger, Sanjeev Satheesh, Shubho Sengupta, Adam Coates, and Andrew Y. Ng. Deep speech: scaling up end-to-end speech recognition. 2014. arXiv:1412.5567.

+
+
3
+

Ronan Collobert, Christian Puhrsch, and Gabriel Synnaeve. Wav2letter: an end-to-end convnet-based speech recognition system. 2016. arXiv:1609.03193.

+
+
4(1,2,3,4)
+

Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, and Michael Auli. Wav2vec 2.0: a framework for self-supervised learning of speech representations. 2020. arXiv:2006.11477.

+
+
5
+

Nal Kalchbrenner, Erich Elsen, Karen Simonyan, Seb Noury, Norman Casagrande, Edward Lockhart, Florian Stimberg, Aaron van den Oord, Sander Dieleman, and Koray Kavukcuoglu. Efficient neural audio synthesis. 2018. arXiv:1802.08435.

+
+
+

+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/objects.inv b/0.9.0/objects.inv new file mode 100644 index 0000000000..edce73f79b Binary files /dev/null and b/0.9.0/objects.inv differ diff --git a/0.9.0/py-modindex.html b/0.9.0/py-modindex.html new file mode 100644 index 0000000000..c2c4900a5e --- /dev/null +++ b/0.9.0/py-modindex.html @@ -0,0 +1,646 @@ + + + + + + + + + + + + Python Module Index — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+ + +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/search.html b/0.9.0/search.html new file mode 100644 index 0000000000..fd564d29fd --- /dev/null +++ b/0.9.0/search.html @@ -0,0 +1,639 @@ + + + + + + + + + + + + Search — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ + + + +
+ +
+ +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/searchindex.js b/0.9.0/searchindex.js new file mode 100644 index 0000000000..334ee846a6 --- /dev/null +++ b/0.9.0/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({docnames:["backend","compliance.kaldi","datasets","functional","index","kaldi_io","models","sox_effects","torchaudio","transforms","utils"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.index":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.todo":2,"sphinx.ext.viewcode":1,"sphinxcontrib.bibtex":7,sphinx:56},filenames:["backend.rst","compliance.kaldi.rst","datasets.rst","functional.rst","index.rst","kaldi_io.rst","models.rst","sox_effects.rst","torchaudio.rst","transforms.rst","utils.rst"],objects:{"torchaudio.backend.common":{AudioMetaData:[0,0,1,""]},"torchaudio.backend.soundfile_backend":{info:[0,1,1,""],load:[0,1,1,""],save:[0,1,1,""]},"torchaudio.backend.sox_io_backend":{info:[0,1,1,""],load:[0,1,1,""],save:[0,1,1,""]},"torchaudio.compliance.kaldi":{fbank:[1,1,1,""],mfcc:[1,1,1,""],resample_waveform:[1,1,1,""],spectrogram:[1,1,1,""]},"torchaudio.datasets":{CMUARCTIC:[2,0,1,""],COMMONVOICE:[2,0,1,""],GTZAN:[2,0,1,""],LIBRISPEECH:[2,0,1,""],LIBRITTS:[2,0,1,""],LJSPEECH:[2,0,1,""],SPEECHCOMMANDS:[2,0,1,""],TEDLIUM:[2,0,1,""],VCTK:[2,0,1,""],VCTK_092:[2,0,1,""],YESNO:[2,0,1,""]},"torchaudio.datasets.CMUARCTIC":{__getitem__:[2,2,1,""]},"torchaudio.datasets.COMMONVOICE":{__getitem__:[2,2,1,""]},"torchaudio.datasets.GTZAN":{__getitem__:[2,2,1,""]},"torchaudio.datasets.LIBRISPEECH":{__getitem__:[2,2,1,""]},"torchaudio.datasets.LIBRITTS":{__getitem__:[2,2,1,""]},"torchaudio.datasets.LJSPEECH":{__getitem__:[2,2,1,""]},"torchaudio.datasets.SPEECHCOMMANDS":{__getitem__:[2,2,1,""]},"torchaudio.datasets.TEDLIUM":{__getitem__:[2,2,1,""],phoneme_dict:[2,2,1,""]},"torchaudio.datasets.VCTK":{__getitem__:[2,2,1,""]},"torchaudio.datasets.VCTK_092":{__getitem__:[2,2,1,""]},"torchaudio.datasets.YESNO":{__getitem__:[2,2,1,""]},"torchaudio.functional":{DB_to_amplitude:[3,1,1,""],allpass_biquad:[3,1,1,""],amplitude_to_DB:[3,1,1,""],angle:[3,1,1,""],apply_codec:[3,1,1,""],band_biquad:[3,1,1,""],bandpass_biquad:[3,1,1,""],bandreject_biquad:[3,1,1,""],bass_biquad:[3,1,1,""],biquad:[3,1,1,""],complex_norm:[3,1,1,""],compute_deltas:[3,1,1,""],compute_kaldi_pitch:[3,1,1,""],contrast:[3,1,1,""],create_dct:[3,1,1,""],create_fb_matrix:[3,1,1,""],dcshift:[3,1,1,""],deemph_biquad:[3,1,1,""],detect_pitch_frequency:[3,1,1,""],dither:[3,1,1,""],equalizer_biquad:[3,1,1,""],flanger:[3,1,1,""],gain:[3,1,1,""],griffinlim:[3,1,1,""],highpass_biquad:[3,1,1,""],lfilter:[3,1,1,""],lowpass_biquad:[3,1,1,""],magphase:[3,1,1,""],mask_along_axis:[3,1,1,""],mask_along_axis_iid:[3,1,1,""],mu_law_decoding:[3,1,1,""],mu_law_encoding:[3,1,1,""],overdrive:[3,1,1,""],phase_vocoder:[3,1,1,""],phaser:[3,1,1,""],resample:[3,1,1,""],riaa_biquad:[3,1,1,""],sliding_window_cmn:[3,1,1,""],spectral_centroid:[3,1,1,""],spectrogram:[3,1,1,""],treble_biquad:[3,1,1,""],vad:[3,1,1,""]},"torchaudio.kaldi_io":{read_mat_ark:[5,1,1,""],read_mat_scp:[5,1,1,""],read_vec_flt_ark:[5,1,1,""],read_vec_flt_scp:[5,1,1,""],read_vec_int_ark:[5,1,1,""]},"torchaudio.models":{ConvTasNet:[6,0,1,""],DeepSpeech:[6,0,1,""],Wav2Letter:[6,0,1,""],Wav2Vec2Model:[6,0,1,""],WaveRNN:[6,0,1,""],wav2vec2_base:[6,1,1,""],wav2vec2_large:[6,1,1,""],wav2vec2_large_lv60k:[6,1,1,""]},"torchaudio.models.ConvTasNet":{forward:[6,2,1,""]},"torchaudio.models.DeepSpeech":{forward:[6,2,1,""]},"torchaudio.models.Wav2Letter":{forward:[6,2,1,""]},"torchaudio.models.Wav2Vec2Model":{extract_features:[6,2,1,""],forward:[6,2,1,""]},"torchaudio.models.WaveRNN":{forward:[6,2,1,""]},"torchaudio.models.wav2vec2.utils":{import_fairseq_model:[6,1,1,""],import_huggingface_model:[6,1,1,""]},"torchaudio.sox_effects":{apply_effects_file:[7,1,1,""],apply_effects_tensor:[7,1,1,""],effect_names:[7,1,1,""],init_sox_effects:[7,1,1,""],shutdown_sox_effects:[7,1,1,""]},"torchaudio.transforms":{AmplitudeToDB:[9,0,1,""],ComplexNorm:[9,0,1,""],ComputeDeltas:[9,0,1,""],Fade:[9,0,1,""],FrequencyMasking:[9,0,1,""],GriffinLim:[9,0,1,""],InverseMelScale:[9,0,1,""],MFCC:[9,0,1,""],MelScale:[9,0,1,""],MelSpectrogram:[9,0,1,""],MuLawDecoding:[9,0,1,""],MuLawEncoding:[9,0,1,""],Resample:[9,0,1,""],SlidingWindowCmn:[9,0,1,""],SpectralCentroid:[9,0,1,""],Spectrogram:[9,0,1,""],TimeMasking:[9,0,1,""],TimeStretch:[9,0,1,""],Vad:[9,0,1,""],Vol:[9,0,1,""]},"torchaudio.transforms.AmplitudeToDB":{forward:[9,2,1,""]},"torchaudio.transforms.ComplexNorm":{forward:[9,2,1,""]},"torchaudio.transforms.ComputeDeltas":{forward:[9,2,1,""]},"torchaudio.transforms.Fade":{forward:[9,2,1,""]},"torchaudio.transforms.FrequencyMasking":{forward:[9,2,1,""]},"torchaudio.transforms.GriffinLim":{forward:[9,2,1,""]},"torchaudio.transforms.InverseMelScale":{forward:[9,2,1,""]},"torchaudio.transforms.MFCC":{forward:[9,2,1,""]},"torchaudio.transforms.MelScale":{forward:[9,2,1,""]},"torchaudio.transforms.MelSpectrogram":{forward:[9,2,1,""]},"torchaudio.transforms.MuLawDecoding":{forward:[9,2,1,""]},"torchaudio.transforms.MuLawEncoding":{forward:[9,2,1,""]},"torchaudio.transforms.Resample":{forward:[9,2,1,""]},"torchaudio.transforms.SlidingWindowCmn":{forward:[9,2,1,""]},"torchaudio.transforms.SpectralCentroid":{forward:[9,2,1,""]},"torchaudio.transforms.Spectrogram":{forward:[9,2,1,""]},"torchaudio.transforms.TimeMasking":{forward:[9,2,1,""]},"torchaudio.transforms.TimeStretch":{forward:[9,2,1,""]},"torchaudio.transforms.Vad":{forward:[9,2,1,""]},"torchaudio.transforms.Vol":{forward:[9,2,1,""]},"torchaudio.utils":{sox_utils:[10,3,0,"-"]},"torchaudio.utils.sox_utils":{list_effects:[10,1,1,""],list_read_formats:[10,1,1,""],list_write_formats:[10,1,1,""],set_buffer_size:[10,1,1,""],set_seed:[10,1,1,""],set_use_threads:[10,1,1,""],set_verbosity:[10,1,1,""]},torchaudio:{get_audio_backend:[8,1,1,""],info:[8,1,1,""],list_audio_backends:[8,1,1,""],load:[8,1,1,""],save:[8,1,1,""],set_audio_backend:[8,1,1,""]}},objnames:{"0":["py","class","Python class"],"1":["py","function","Python function"],"2":["py","method","Python method"],"3":["py","module","Python module"]},objtypes:{"0":"py:class","1":"py:function","2":"py:method","3":"py:module"},terms:{"0431e":7,"0650e":7,"0db":[3,7],"112kbp":0,"1151e":7,"128khz":0,"14th":[3,9],"179d6e9a88202ab0a2f":9,"1860e":7,"1e9":3,"2188e":7,"2ch":7,"4122e":7,"4761e":7,"60k":6,"6159e":7,"6924e":7,"7b98e3e":[3,9],"8073e":7,"8103e":7,"8114e":7,"960h":6,"break":4,"byte":[0,7,10],"case":[0,6],"class":[0,2,6,7,9],"default":[0,1,2,3,6,9],"final":3,"float":[0,1,3,6,9],"function":[0,4,7,9,10],"import":[6,9],"int":[0,1,2,3,5,6,7,8,9,10],"long":[4,7],"nathana\u00ebl":[3,9],"new":[0,1,3,7],"return":[0,1,2,3,5,6,7,8,9,10],"s\u00f8ndergaard":[3,9],"short":[3,9],"super":7,"switch":[0,8],"true":[0,1,2,3,7,9,10],CMS:1,For:[0,2,3,4,7,9],Not:[0,2],One:[2,3,8,9],The:[0,1,2,3,4,5,6,7,8,9],There:[0,8],These:4,Use:[3,7],Used:[0,3],Uses:0,Using:3,With:3,__getitem__:[2,7],__init__:7,__len__:[2,7],_get_strid:1,_length:[3,9],a_coeff:3,aaron:6,abdelrahman:6,abl:7,about:2,abov:[0,3,9],absolut:1,accent:2,accept:[0,7],accord:[5,9],accur:0,acm:6,acoust:[3,9],across:[4,7],activ:[3,9],actual:7,adam:6,adapt:0,add:[1,3,7,9],adding:3,address:6,adjust:3,advanc:3,aew:2,affect:[3,10],affel:[3,9],after:[3,7,9],again:7,age:2,ago:3,ahead:4,ahw:2,alaw:0,alexei:6,algorithm:[3,9],alias:[1,3,9],all:[0,2,3,7],allow:[0,2,3,7,9],allowed_gap:[3,9],allpass:7,almost:2,along:[3,9],also:[0,1,2,3,4,7,9],alter:7,altern:[3,9],although:4,alwai:[0,7],amb:0,ames:[3,9],amin:3,amount:[3,9],amplif:3,amplifi:3,amplitud:[3,9],amplitude_to_db:9,amplitudetodb:4,amr:0,amr_nb:0,amr_wb:0,andrew:6,ani:[3,5,9],aniel:[3,9],annot:[0,7],anoth:[0,9],anti:[1,3,9],apf:3,api:[2,4],appli:[1,3,4,9],applic:[1,3,9],apply_effects_fil:7,apply_effects_tensor:7,approx:0,architectur:6,arctic:2,area:[3,9],arg:2,argument:[0,2,3,6,7,9],ark:5,around:[1,5],arrai:[3,9],arxiv:6,assert:7,assert_allclos:6,assign:[3,8,9],assum:[3,9],athryn:[3,9],att:[3,9],attempt:[3,9],attenberg:[3,9],attenu:[3,7],audio:[0,1,2,3,4,6,7,8,9,10],audio_ext:2,aug:6,augment:3,auli:6,aup:2,automat:[3,7],avail:[2,4,7,8,10],averag:[3,9],avoid:[3,9],awb:2,awen:[3,9],awni:6,axb:2,axi:[3,9],b_coeff:3,babaali:3,back:[3,9],backend:[3,4,10],background:[3,9],backward:4,baevski:6,bagher:3,balaz:[3,9],band:[3,7,9],bandlimit:3,bandpass:7,bank:[3,9],base:[0,3,4,6,7,8,9],basic:7,bass:3,batch:[3,6,7,9],batch_siz:[2,6,7],bdl:2,becaus:[2,4,7,9],been:[3,9],befor:[0,1,3,9],behind:4,being:[3,9],beta:[3,4,9],between:[3,6,9],bin:[1,3,6,9],binari:[0,4],bit:[0,3,7],bitrat:0,bits_per_sampl:[0,3],bitstream:2,blackman:1,blackman_coeff:1,block:6,bool:[0,1,2,3,7,9,10],boost:3,boot_tim:[3,9],both:[0,3,9],bryan:6,buffer:10,buffer_s:10,build:6,built:[2,9],burst:[3,9],bz2:2,c_t:3,cach:9,cahng:[3,9],calcul:[1,9],call:[0,7],callabl:9,can:[0,1,2,3,4,6,7,8,9],cannot:0,carl:6,carri:9,casagrand:6,casper:6,catanzaro:6,caus:3,causal:6,caution:1,ccrma:3,cdoubl:[3,9],ceil:[3,6,9],cent:7,center:[3,9],center_freq:3,central:3,central_freq:3,centroid:[3,9],cepstra:1,cepstral:[3,9],cepstral_lift:1,cepstrum:9,certain:[0,7],cfloat64:3,cfloat:[3,9],chain:[3,7,9,10],chang:[0,1,3,4,7,9],channel:[0,1,3,6,7,9,10],channels_first:[0,3,7],chapter_id:2,characterist:[3,9],check:7,checkpoint_util:6,christian:6,cienc:[3,9],clamp:3,classif:4,classifi:[4,6],clb:2,clean:[2,7],client_id:2,clip:[3,9],cmn:[3,9],cmn_window:[3,9],cmu_arct:2,cmuarctic:4,coat:6,codec:[0,3],coeffcient:3,coeffici:[1,3,9],collobert:6,colour:3,column:[3,9],com:[2,9],command:[0,2,7],commit:4,commnad:7,common:[2,3,4,6,9],commonvoic:4,compand:[3,9],compar:[3,6],compat:[0,1,3,4,7],compatibl:0,compil:[0,7,10],complet:[1,4],complex:[4,9],complex_specgram:[3,9],complex_tensor:[3,9],complexnorm:4,complianc:4,compon:1,composit:9,compress:[0,3],comput:[1,3,6,9],compute_delta:9,computedelta:4,conda:4,condit:3,confer:[3,9],configur:[0,6,10],connect:6,consecut:6,consist:[0,4,9],const_skirt_gain:3,constant:[1,3,9],construct:2,contain:6,content:3,continu:3,control:[1,3,9],conv1d:6,conv:6,converg:[3,9],convers:[3,9],convert:[0,2,3,6,9],convnet:6,convolut:6,convtasnet:4,cookbook:3,corpu:2,correct:0,correl:3,correspond:[0,2,3,6,7],cosin:9,cost:3,could:[3,9],coverag:4,cpu:[0,7],creat:[1,2,3,5,7,9],critic:3,cross:3,current:[0,3,8,9],custom:2,cut:[3,9],cutoff:[1,3],cutoff_freq:3,d_t:3,daniel:3,data:[1,2,3,4,7,8],data_load:2,dataload:[2,7],dataset:[4,7],datashar:2,db_multipli:3,dct:[3,9],dct_type:9,debug:10,decai:3,decibel:[3,9],decod:[3,6,9],decreas:[3,9],deep:6,deepspeech:4,def:7,defin:[2,3,7,9],definit:6,delai:3,delay_m:3,delta2:3,delta:[3,9],delta_pitch:3,den:6,denomin:3,densiti:3,density_funct:3,depend:[0,1,3,5,9],depth:[0,3],describ:[3,4],descriptor:5,design:3,desir:[1,3,7,9],detail:[0,2,3,8,9,10],detect:[0,3,7,9],detector:[3,9],determin:[0,3],determnin:9,dev:2,devic:[4,9],diamo:6,dict:[2,9,10],dictionari:[2,5],dieleman:6,differ:[0,3,7,8,9],digital_biquad_filt:3,dimens:[0,1,3,6,7,9],dimension:3,diment:6,directli:0,directori:2,discret:9,distort:3,distribut:[0,4,6],dither:1,divid:[3,9],doc:9,document:[0,4],doe:[0,7,9],doesn:0,doi:[3,6,9],domain:[6,9],doubl:3,dowload:2,down:[3,9],down_vot:2,download:2,downsampl:2,driven:3,dropout:6,dtype:[0,3,7,9],due:[0,2],dump:7,durat:[1,3,7,9],dynam:3,each:[1,3,6,9],earli:4,earliest:[3,9],eas:8,edg:3,editor:[3,9],edu:3,edward:6,eei:2,effect:[0,1,3,4,9,10],effect_nam:7,effici:[1,3,6,9],either:[0,2,3,6,9],element:1,elementwis:9,elimin:3,els:[0,1,3],elsen:6,emiss:6,emphasi:3,empti:2,enabl:10,enc_kernel_s:6,enc_num_feat:6,encod:[0,3,6,9],encoder_out:6,end:[1,3,6,7,9],energi:[1,3,9],energy_floor:1,enhanc:3,enhancement_amount:3,enough:0,entir:2,entri:[3,9],epsilon:1,equal:[3,6,9],equat:3,equival:3,ergstra:[3,9],erich:6,error:[2,3,7],especi:[3,9],estim:[3,9],etc:[0,3,7,9],euclidian:9,evalu:3,even:[0,3],event:[3,9],exampl:[2,3,5,6,7,9],exce:3,except:4,expect:[1,3,4,9],explain:[2,3],expon:[3,9],exponenti:9,express:0,extens:[0,2,7],extent:[3,9],extra:[1,3,9],extract:[1,4,6],extract_featur:6,extractor:6,f_max:[3,9],f_min:[3,9],face:6,facebook:6,factor:[0,1,3],fade:4,fade_in_len:9,fade_out_len:9,fade_shap:9,failur:10,fairseq:6,fals:[0,1,2,3,7,9],fast:[3,9],faster:[3,9],fatchord:6,feat:[1,3],featur:[1,4,6],feature_extractor:6,feedback:[3,4],fem:2,fetch:8,fft:[1,3,9],field:3,file:[0,1,2,3,5,6,8],file_list:7,file_or_fd:5,filedescriptor:5,filepath:[0,8],filter:[1,4,7,9],filterbank:[1,3,9],fine:6,first:[3,6,7,9],fit:[0,1],fix:[0,1],fixed_r:9,flac:[0,2],flag:4,flist:7,float32:[0,5,7,9],float64:[5,9],floor:1,florian:6,folder_in_arch:2,follow:[0,2,7,8],fool:[3,9],form:[3,9],format:[0,2,3,7,8,10],forward:[3,6,7,9],found:[2,3,9],four:0,fourier:[3,9],frac:[3,6],fraction:[1,3,9],frame:[0,1,3,6,9],frame_length:[1,3],frame_offset:0,frame_shift:[1,3],frame_tim:3,frames_per_chunk:3,framework:[4,6],free:0,freq:[1,3,9],freq_high:3,freq_low:3,freq_mask_param:9,frequenc:[1,3,6,9],frequencymask:4,from:[0,1,2,3,5,6,7,9,10],from_pretrain:6,front:[3,9],full:[3,9],fulli:6,gabriel:6,gain:[7,9],gain_db:3,gain_in:3,gain_out:3,gain_typ:9,gap:[3,4,9],gaussian:3,gender:2,gener:[1,4,5,6,7,9],genr:2,get:[0,1,7,8],get_audio_backend:8,ghahremani:3,gist:9,github:9,give:[0,2,3,7,9],given:[0,1,3,4,6,7,9],gka:2,gpdf:3,greg:6,griffin:[3,9],griffinlim:4,gsm:0,gtzan:4,gzip:5,half:[3,9],half_sin:9,ham:1,han:1,handl:[0,1,2],hann_window:9,hannun:6,happen:4,hardwar:3,harmon:3,has:[0,3,7,9,10],have:[2,3,6],haythamfayek:9,header:[0,7],help:[0,3,7,9],henc:2,henri:6,here:[0,2,9],hidden:6,high:[0,1,9],high_freq:1,higher:9,highest:[0,1,3,9],highlight:3,highpass:3,hint:0,hop:[3,9],hop_length:[3,6,9],howev:[0,4,7],hp_filter_freq:[3,9],hp_lifter_freq:[3,9],htk:[0,1,3,9],htk_compat:1,html:[0,2,3,9,10],http:[0,2,3,6,9,10],hug:6,iang:[3,9],icar:[3,9],icassp:[3,9],ideal:6,ident:1,identifi:2,ieee:[3,6,9],ieto:[3,9],ignal:[3,9],ignor:[2,3,9],iid_mask:9,iii:0,iir:3,imaginari:[3,9],implement:[0,2,3,6,8,9],improv:4,includ:[0,1,3,9],incomplet:3,increas:[3,9,10],index:[2,7],indic:[3,6,7,9],individu:1,infer:[0,7],inflect:1,info:[2,3,8,9],inform:[0,2],init_sox_effect:7,initi:[3,4,9],input:[0,1,2,3,6,7,9],input_length:6,input_sample_r:7,input_typ:6,instal:[0,5],instanc:6,instead:[0,3,9],instrument:3,int16:0,int24:0,int32:[0,10],integ:[0,3,7],intens:0,intention:[0,7],interfac:0,intern:[3,6,7,9],interpol:3,interpret:[0,9],interv:3,introduc:[3,6],introduct:2,invalid:2,invers:9,inversemelscal:4,iso:3,iter:[3,5,9],its:[2,3],jae:[3,9],jan:3,jare:6,jit:[0,7],jmk:2,jos:3,kaiser:[3,9],kaiser_window:[1,3,9],kalchbrenn:6,kaldi:[3,4,5],kaldi_io:4,karen:6,kastnerkyl:9,kavukcuoglu:6,kbit:0,kbp:0,kei:[2,5],keithito:2,kernel:[3,6,9],kernel_s:6,khudanpur:3,korai:6,korbinian:3,ksp:2,label:[2,6],lack:2,languag:6,larg:6,last:[1,3],latenc:[3,9],latest:9,law:[0,3,9],layer:[0,6],lead:[3,9],learn:[4,6,9],least:7,leav:7,left:[1,3,9],lehman:3,len:7,length:[0,1,3,6,7,9],less:[0,1,3,9],level:[0,2,3,9,10],libmad:0,libmp3lam:0,librari:[0,4],librispeech:4,libritt:4,librosa:[3,9],libsox:[0,7,10],light:5,like:[0,4,7],likelihood:6,lim:[3,9],limit:4,limiter_gain:3,line:3,linear:[0,1,3,9],link:0,linspac:3,linux:0,list:[2,4,6,8,10],list_audio_backend:8,list_effect:10,list_read_format:10,list_write_format:10,ljm:2,ljspeech:4,lli:[3,9],lnh:2,load:[2,6,7,8,9],load_model_ensemble_and_task:6,load_state_dict:6,loader:7,locat:2,lockhart:6,log10:3,log:[1,6,9],log_mel:9,logarithm:9,logit:6,longer:[2,7],loss:9,lossi:0,lossless:0,louder:3,low:1,low_freq:1,lower:[1,3,9],lowest:[0,3],lowpass:[3,7],lowpass_cutoff:3,lowpass_filter_width:[1,3,9],lp_filter_freq:[3,9],lp_lifter_freq:[3,9],luo:6,lv60:6,machin:[4,9],maco:0,made:8,magnitud:[1,3,6,9],mai:[0,3,4,9],maintain:4,major:4,majora:[3,9],make:3,manual:7,map:[2,10],marsya:2,mask:[3,6,9],mask_param:3,mask_valu:[3,9],match:[1,3],math:3,matric:4,matrix:[3,5,9],max:3,max_f0:3,max_frames_lat:3,max_it:9,max_v:3,maximum:[0,3,9],mean:[1,2,3,9],measur:[3,9],measure_dur:[3,9],measure_freq:[3,9],measure_smooth_tim:[3,9],median:3,mel:[1,3,9],mel_scal:[3,9],mel_specgram:9,melkwarg:9,melresnet:6,melscal:4,melspec:9,melspectrogram:[4,6],merg:7,mesgarani:6,messag:10,meta:[0,8],metadata:[0,2],method:[0,1,2,3,7,9],mfc:[3,9],mfcc:[4,6],mic1:2,mic2:2,mic_id:2,michael:6,microphon:2,might:[0,7],millisecond:[1,3],mimick:[3,9],min:3,min_cmn_window:[3,9],min_dur:1,min_f0:3,minim:[3,9],minimum:[1,3,9],miss:0,mod_spe:3,mode:[3,9],model:4,model_fil:6,modifi:[3,9],modul:[0,3,5,6,7,8,10],modulo:[3,9],moham:6,momentum:[3,9],mono:1,more:[1,2,3,9],most:[0,7],mp3:0,mpeg:0,msk_kernel_s:6,msk_num_feat:6,msk_num_hidden_feat:6,msk_num_lay:6,msk_num_stack:6,much:3,mulawdecod:4,mulawencod:4,multi:[0,3,9],multipl:[2,3,7],multipli:[3,9],multiprocess:2,multithread:10,music:[3,9],must:[0,1,3,6,9],mutlithread:10,n_batch:6,n_channel:6,n_class:6,n_fc:6,n_featur:6,n_fft:[3,9],n_frame:[3,9],n_freq:[3,6,9],n_hidden:6,n_iter:[3,9],n_mel:[3,9],n_mfcc:[3,9],n_order:3,n_output:6,n_res_block:6,n_rnn:6,n_stft:9,n_time:6,nal:6,nalysi:[3,9],name:[2,7,8,10],nativ:[0,3,9],nccf:3,nccf_ballast:3,necessari:3,need:[1,3,4,5,7,9],neg:[1,3,6,9],net:[0,3,9,10],network:6,neural:6,new_freq:[1,3,9],nima:6,nois:[3,9],noise_down_tim:[3,9],noise_reduction_amount:[3,9],noise_shap:3,noise_up_tim:[3,9],non:[2,3,6],none:[0,2,3,6,7,8,9],nonlinear:3,norm:[3,9],norm_var:[3,9],normal:[0,1,3,7,9],normalis:7,normalized_text:2,normalized_transcript:2,norman:6,note:[0,2,3,7],notic:4,nouri:6,now:7,nthread:2,num:6,num_cep:1,num_channel:0,num_class:6,num_featur:6,num_fram:[0,3,9],num_mel_bin:1,num_out:6,num_sampl:0,num_sourc:6,num_work:2,number:[0,1,3,6,9],number_of_class:6,numbr:6,numer:[3,9],numeric_limit:1,nyquist:[1,3,9],object:[0,7,8,9],off:[0,1,3,9],offset:[1,3],ogg:0,olin:[3,9],onc:[7,9],one:[0,3,4,6,7,9],onesid:[3,9],onfer:[3,9],onli:[0,1,3,6,7,9],onlin:3,oord:6,open:[4,5],openmp:10,openslr:2,oper:[1,3,4,8],opihi:2,optim:9,option:[0,1,2,3,6,7,8,9,10],opu:0,order:[3,7,9],org:[2,3,6,9],orient:3,orig_freq:[1,3,9],origin:[1,2,3,6,9],original_text:2,ortho:[3,9],other:[0,1,2,3,7,9],otherwis:[0,3,7,9],our:3,out:9,output:[1,3,6,9],over:[3,6],overlap:[3,9],overrid:[0,7],overriding_r:9,overview:4,p280:2,p315:2,p362:2,packag:4,pad:[1,3,6,7,9],pad_mod:[3,9],padded_window_s:1,paper:[2,6],parallel:10,parallelli:2,paramet:[0,1,2,3,5,6,7,8,9,10],part:[3,4,9],particular:3,pass:[2,3,6,7,9],path:[0,2,7],pathlib:[0,2],pcm:0,pcm_:0,pcm_f:0,pcm_u:0,peak:3,pegah:3,penalty_factor:3,per:[0,3,9],perceiv:3,percentag:3,percuss:3,perform:[1,3,4,6],perhap:3,period:[3,9],perraudin:[3,9],perturb:7,peter:[3,9],phase:[3,9],phase_adv:3,phonem:2,phoneme_dict:2,pick:0,piecewis:1,pipe:5,pitch:[3,7,9],plan:2,playback:3,pleas:[0,2,6,9],point:[0,1,3,9],pointer:3,pole:[3,7],popular:4,port:[3,9],posit:9,possibl:[3,9],povei:[1,3],power:[1,3,9],power_spectrum:6,pre:9,pre_trigger_tim:[3,9],precis:[3,9],precomput:3,predictor:6,preemphasi:1,preemphasis_coeffici:1,prefer:3,prenger:6,present:2,preserv:[3,7,9],pretrain:6,prevent:3,prior:[3,9],prng:10,probabl:[3,6],problem:3,process:[1,3,6,9,10],produc:1,product:[6,9],project:4,proper:0,properti:2,prototyp:4,provid:[0,7,8,9],pseudo:[3,7],publicli:2,publish:[2,6],puhrsch:6,put:1,pypi:4,pysoundfil:0,python:9,pytorch:4,q_factor:3,quadrat:3,qualiti:0,quantis:3,quantiz:3,quantization_channel:[3,9],quarter_sin:9,quiet:[3,9],quieter:[3,9],quitet:9,rand:7,rand_init:[3,9],randn:[3,7],random:[3,7],randomli:[3,9],randomperturbationfil:7,rang:[0,1,3,10],rate:[0,3,7,9],ratio:9,raw:[1,3,6,9],raw_energi:1,read:[0,5,7,10],readm:2,real:[3,9],reason:[3,9],receiv:6,recognit:[3,6],recommend:1,recomput:3,recompute_fram:3,record:[3,9],recov:[3,9],recoveri:[3,9],rectangular:[1,3],reduc:[1,3,9],reduct:[3,9],redund:[3,9],ref:3,refer:[0,2,4,8],reflect:[1,3,9],regen:3,reject:3,rel:[1,3],releas:[2,4],release1:2,release2:2,release3:2,relev:3,reload:6,remain:0,remix:7,remov:3,remove_dc_offset:1,replic:[3,9],report:0,repres:[1,3,9],represent:6,requir:[0,6,7],resampl:[1,4,7],resample_frequ:3,resampling_method:[1,3,9],resblock:6,reson:0,resourc:[2,4],respect:2,rest:2,restrict:0,result:[0,2,3,6,7,9],retain:[3,9],return_complex:[3,9],reus:3,revers:[3,9],revis:3,rewrit:9,riaa:3,rian:[3,9],ric:[3,9],riedhamm:3,right:[1,3],riol:[3,9],rms:2,rnn:6,roceed:[3,9],roll:[1,3,9],rolloff:[1,3,9],ronan:6,root:2,round:1,round_to_power_of_two:1,row:3,rpdf:3,run:[3,4,7,9],runtim:7,rxr:2,ryan:6,safe:7,same:[3,7,9],sampl:[0,1,2,3,6,7,9],sample_frequ:1,sample_r:[0,2,3,6,7,8,9],sander:6,sanjeev:[3,6],satheesh:6,save:[3,6,7,8],scale:[1,3,6,9],scott:3,scp:5,script:[0,7],search:[3,9],search_tim:[3,9],seb:6,second:[1,3,7,9],section:2,see:[0,2,3,4,9],seed:10,segment:1,select:2,self:[6,7,9],sengupta:6,sentenc:2,separ:[3,6],sequenc:6,sequenti:9,session:6,set:[0,1,3,6,8,9,10],set_audio_backend:[0,8],set_buffer_s:10,set_se:10,set_use_thread:10,set_verbos:10,sgd:9,sgdarg:9,shape:[0,1,3,6,7,9],sharp:[1,3,9],sharper:[1,3,9],shelv:3,shift:[1,3,7],shorter:[3,9],should:[1,3,4,9],shubho:6,shuffl:2,shutdown:4,shutdown_sox_effect:7,side:[3,9],sign:0,signal:[0,1,3,9],signific:3,silenc:[3,7,9],similar:[1,2,3,7,9],simonyan:6,simpl:[3,9],simulate_first_pass_onlin:3,sinc:0,sinc_interpol:[1,3,9],sing:3,singl:[0,3,7],sinusoid:3,size:[0,1,3,6,7,9,10],skip:[0,2],skirt:3,slanei:[3,9],slide:[3,9],slidingwindowcmn:4,slight:7,slow:9,slower:9,slp:2,slt:2,small:[3,9],smallest:3,smooth:3,snip:3,snip_edg:[1,3],snippet:[3,9],soft:3,soft_min_f0:3,solv:9,some:[1,2,3,9],sometim:4,sound:[2,3,9],soundfil:[4,8],soundfile_backend:0,sourc:[0,1,2,3,4,5,6,7,8,9,10],sourceforg:[0,3,9,10],sox:[3,4,7,9,10],sox_effect:4,sox_io:[0,8],sox_io_backend:[0,3],sox_util:4,soxeffecttensortransform:7,soxeffecttransform:7,soxformat:0,speaker:2,speaker_id:2,spec_f:9,specgram:[3,6,9],specgram_mel_db:9,specifi:[0,1],spectral:[3,9],spectral_centroid:9,spectralcentroid:4,spectrogram:[4,6],spectrum:[3,9],speech:[0,2,3,6,9],speech_commands_v0:2,speechcommand:4,speed:[3,7,9],sph:[0,2],sphere:0,split:[3,6,9],squar:9,src:[0,8],stabl:[4,9],stack:6,stage:4,stanford:3,start:[0,3,6,9],state_dict:6,statu:4,std:1,step:3,stft:[3,9],still:[3,9],stimberg:6,stop:9,store:3,str:[0,1,2,3,5,6,7,8,9,10],stream:5,stretch:[3,9],stride:6,string:[0,3,5,7,9],structur:4,stype:9,subclass:2,subpackag:6,subset:2,subtract:1,subtract_mean:1,suffici:1,suggest:1,sum_:3,supervis:6,support:[0,2,3,4,10],suppos:7,surpass:6,sush:0,synnaev:6,synthesi:6,system:[6,8],tag:4,take:[0,3],talk_id:2,tar:2,target:2,target_transform:2,task:6,taslp:6,tasnet:6,tedlium:4,tensor:[0,1,2,3,5,6,8,9],term:4,test:[0,2,4,6,9],testing_list:2,text:[2,3,6,9],textbook:9,than:[0,3,7,9],thei:[2,3,9],theory_ideal_bandlimited_interpol:3,therefor:7,thi:[0,1,2,3,4,5,6,7,9,10],thing:[3,9],though:[0,7],through:[4,6],time:[0,1,3,4,6,7,9],time_mask_param:9,timemask:4,timestretch:4,timsainb:9,togeth:9,tolerance_chang:9,tolerance_loss:9,tone:3,top:2,top_db:[3,9],torch:[0,1,2,3,5,6,7,8,9],torchelast:4,torchscript:[0,7],torchserv:4,torchtext:4,torchvis:4,total:1,tpdf:3,track:3,train:2,tran:7,transact:6,transcript:2,transform:[2,3,4,6,7],transpos:6,treat:[3,9],trebl:3,triangular:[1,3,9],trigger:[3,9],trigger_level:[3,9],trigger_tim:[3,9],trim:[3,7,9],trmal:3,truncat:3,tsv:2,tune:[3,6],tupl:[0,2,3,5,6,7],turn:[1,3,9],twice:[3,9],two:[1,2,3,9],txt:2,type:[0,1,2,3,5,6,7,8,9,10],typic:[3,4],udio:[3,9],uff:[3,9],uint8:0,ulaw:0,unassign:8,underli:[0,10],uniform:3,uniformli:[3,9],union:2,unit:6,unknown:0,unsign:0,untouch:7,up_vot:2,upsampl:[3,6],upsample_filter_width:3,upsample_scal:6,url:[2,6],usag:[7,10],use:[0,1,2,3,5,6,7,8,9,10],use_energi:1,use_log_fbank:1,use_pow:1,use_thread:10,used:[0,2,3,6,9,10],useful:[1,3],user:[4,9],uses:[3,7,9],usic:[3,9],using:[2,3,6,9],usual:[3,9],util:[2,4,7],utter:[2,3,9],utterance_id:2,utterance_numb:2,uvic:2,v_0:3,vad:4,valid:[0,2,6,7,10],validation_list:2,valu:[0,1,2,3,7,9,10],van:6,variabl:[0,3],varianc:[3,9],variou:1,vbr:0,vctk:4,vctk_092:4,vector:[4,6],verbos:10,veri:7,version:[2,3],via:7,view_as_r:[3,9],vinyl:3,visit:2,voic:[3,9],vol:4,voltag:9,volum:[3,9],vorbi:0,vtln:1,vtln_high:1,vtln_low:1,vtln_map:1,vtln_warp:1,w2v_encod:6,wai:[1,3,7],want:[3,9],warn:[1,10],warp:1,waspaa:[3,9],wav2lett:4,wav2vec2:4,wav2vec2_asr:6,wav2vec2forctc:6,wav2vec:6,wav2vec_smal:6,wav2vec_small_960h:6,wav2vecencod:6,wav:[0,2,3,6,7,9],wave:7,wave_form:7,waveform:[1,2,3,6,7,9],wavernn:4,waves_yesno:2,weight:[3,9],well:0,what:[1,3],when:[0,3,6,7,9,10],where:[0,1,2,3,9],whether:[2,3,9],which:[0,2,3,5,7,9],whole:[0,2,3],wideband:0,width:[3,9],wiki:3,wikipedia:[3,9],win_length:[3,9],window:[0,1,3,9],window_fn:9,window_typ:1,wise:3,within:0,without:[3,9],wkwarg:9,won:3,word:2,work:7,worker:2,workshop:[3,9],would:[1,3],wrapper:[1,5],write:10,www:[2,3],x_mu:[3,9],xla:4,yesno:4,yesno_data:2,yet:[4,7],you:[0,1,2,7,8,9],your:[0,9],ython:[3,9],zero:[1,3,9],zeros_lik:6,zeroth:1,zhou:6,zip:[2,7]},titles:["torchaudio.backend","torchaudio.compliance.kaldi","torchaudio.datasets","torchaudio.functional","torchaudio","torchaudio.kaldi_io","torchaudio.models","torchaudio.sox_effects","torchaudio","torchaudio.transforms","torchaudio.utils"],titleterms:{"function":[1,3,6,8],allpass_biquad:3,amplitude_to_db:3,amplitudetodb:9,angl:3,appli:7,apply_codec:3,audiometadata:0,avail:0,backend:[0,8],band_biquad:3,bandpass_biquad:3,bandreject_biquad:3,bass_biquad:3,biquad:3,cmuarctic:2,common:0,commonvoic:2,complex:3,complex_norm:3,complexnorm:9,complianc:1,compute_delta:3,compute_kaldi_pitch:3,computedelta:9,contrast:3,convtasnet:6,create_dct:3,create_fb_matrix:3,data:0,dataset:2,db_to_amplitud:3,dcshift:3,deemph_biquad:3,deepspeech:6,detect_pitch_frequ:3,dither:3,effect:7,equalizer_biquad:3,extract:3,factori:6,fade:9,fbank:1,featur:3,file:7,filter:3,flanger:3,frequencymask:9,gain:3,griffinlim:[3,9],gtzan:2,highpass_biquad:3,import_fairseq_model:6,import_huggingface_model:6,info:0,initi:7,inversemelscal:9,kaldi:1,kaldi_io:5,lfilter:3,librispeech:2,libritt:2,list:7,ljspeech:2,load:0,lowpass_biquad:3,magphas:3,mask_along_axi:3,mask_along_axis_iid:3,matric:5,melscal:9,melspectrogram:9,mfcc:[1,9],model:6,mu_law_decod:3,mu_law_encod:3,mulawdecod:9,mulawencod:9,overdr:3,overview:0,phase_vocod:3,phaser:3,read_mat_ark:5,read_mat_scp:5,read_vec_flt_ark:5,read_vec_flt_scp:5,read_vec_int_ark:5,refer:[3,6,9],resampl:[3,9],resample_waveform:1,resourc:7,riaa_biquad:3,save:0,shutdown:7,sliding_window_cmn:3,slidingwindowcmn:9,soundfil:0,sox:0,sox_effect:7,sox_util:10,spectral_centroid:3,spectralcentroid:9,spectrogram:[1,3,9],speechcommand:2,structur:0,support:7,tedlium:2,tensor:7,timemask:9,timestretch:9,torchaudio:[0,1,2,3,4,5,6,7,8,9,10],transform:9,treble_biquad:3,util:[3,6,8,10],vad:[3,9],vctk:2,vctk_092:2,vector:5,vol:9,wav2lett:6,wav2vec2:6,wav2vec2_bas:6,wav2vec2_larg:6,wav2vec2_large_lv60k:6,wav2vec2model:6,wavernn:6,yesno:2}}) \ No newline at end of file diff --git a/0.9.0/sox_effects.html b/0.9.0/sox_effects.html new file mode 100644 index 0000000000..59519027ba --- /dev/null +++ b/0.9.0/sox_effects.html @@ -0,0 +1,940 @@ + + + + + + + + + + + + + torchaudio.sox_effects — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • torchaudio.sox_effects
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchaudio.sox_effects

+
+

Resource initialization / shutdown

+
+
+torchaudio.sox_effects.init_sox_effects()[source]
+

Initialize resources required to use sox effects.

+
+

Note

+

You do not need to call this function manually. It is called automatically.

+
+

Once initialized, you do not need to call this function again across the multiple uses of +sox effects though it is safe to do so as long as shutdown_sox_effects() is not called yet. +Once shutdown_sox_effects() is called, you can no longer use SoX effects and initializing +again will result in error.

+
+ +
+
+torchaudio.sox_effects.shutdown_sox_effects()[source]
+

Clean up resources required to use sox effects.

+
+

Note

+

You do not need to call this function manually. It is called automatically.

+
+

It is safe to call this function multiple times. +Once shutdown_sox_effects() is called, you can no longer use SoX effects and +initializing again will result in error.

+
+ +
+
+

Listing supported effects

+
+
+torchaudio.sox_effects.effect_names() → List[str][source]
+

Gets list of valid sox effect names

+
+
Returns
+

list of available effect names.

+
+
Return type
+

List[str]

+
+
+
+
Example
>>> torchaudio.sox_effects.effect_names()
+['allpass', 'band', 'bandpass', ... ]
+
+
+
+
+
+ +
+
+

Applying effects

+

Apply SoX effects chain on torch.Tensor or on file and load as torch.Tensor.

+
+

Applying effects on Tensor

+
+
+torchaudio.sox_effects.apply_effects_tensor(tensor: torch.Tensor, sample_rate: int, effects: List[List[str]], channels_first: bool = True) → Tuple[torch.Tensor, int][source]
+

Apply sox effects to given Tensor

+
+

Note

+

This function only works on CPU Tensors. +This function works in the way very similar to sox command, however there are slight +differences. For example, sox command adds certain effects automatically (such as +rate effect after speed and pitch and other effects), but this function does +only applies the given effects. (Therefore, to actually apply speed effect, you also +need to give rate effect with desired sampling rate.).

+
+
+
Parameters
+
    +
  • tensor (torch.Tensor) – Input 2D CPU Tensor.

  • +
  • sample_rate (int) – Sample rate

  • +
  • effects (List[List[str]]) – List of effects.

  • +
  • channels_first (bool) – Indicates if the input Tensor’s dimension is +[channels, time] or [time, channels]

  • +
+
+
Returns
+

Resulting Tensor and sample rate. +The resulting Tensor has the same dtype as the input Tensor, and +the same channels order. The shape of the Tensor can be different based on the +effects applied. Sample rate can also be different based on the effects applied.

+
+
Return type
+

Tuple[torch.Tensor, int]

+
+
+
+
Example - Basic usage
>>>
+>>> # Defines the effects to apply
+>>> effects = [
+...     ['gain', '-n'],  # normalises to 0dB
+...     ['pitch', '5'],  # 5 cent pitch shift
+...     ['rate', '8000'],  # resample to 8000 Hz
+... ]
+>>>
+>>> # Generate pseudo wave:
+>>> # normalized, channels first, 2ch, sampling rate 16000, 1 second
+>>> sample_rate = 16000
+>>> waveform = 2 * torch.rand([2, sample_rate * 1]) - 1
+>>> waveform.shape
+torch.Size([2, 16000])
+>>> waveform
+tensor([[ 0.3138,  0.7620, -0.9019,  ..., -0.7495, -0.4935,  0.5442],
+        [-0.0832,  0.0061,  0.8233,  ..., -0.5176, -0.9140, -0.2434]])
+>>>
+>>> # Apply effects
+>>> waveform, sample_rate = apply_effects_tensor(
+...     wave_form, sample_rate, effects, channels_first=True)
+>>>
+>>> # Check the result
+>>> # The new waveform is sampling rate 8000, 1 second.
+>>> # normalization and channel order are preserved
+>>> waveform.shape
+torch.Size([2, 8000])
+>>> waveform
+tensor([[ 0.5054, -0.5518, -0.4800,  ..., -0.0076,  0.0096, -0.0110],
+        [ 0.1331,  0.0436, -0.3783,  ..., -0.0035,  0.0012,  0.0008]])
+>>> sample_rate
+8000
+
+
+
+
Example - Torchscript-able transform
>>>
+>>> # Use `apply_effects_tensor` in `torch.nn.Module` and dump it to file,
+>>> # then run sox effect via Torchscript runtime.
+>>>
+>>> class SoxEffectTransform(torch.nn.Module):
+...     effects: List[List[str]]
+...
+...     def __init__(self, effects: List[List[str]]):
+...         super().__init__()
+...         self.effects = effects
+...
+...     def forward(self, tensor: torch.Tensor, sample_rate: int):
+...         return sox_effects.apply_effects_tensor(
+...             tensor, sample_rate, self.effects)
+...
+...
+>>> # Create transform object
+>>> effects = [
+...     ["lowpass", "-1", "300"],  # apply single-pole lowpass filter
+...     ["rate", "8000"],  # change sample rate to 8000
+... ]
+>>> transform = SoxEffectTensorTransform(effects, input_sample_rate)
+>>>
+>>> # Dump it to file and load
+>>> path = 'sox_effect.zip'
+>>> torch.jit.script(trans).save(path)
+>>> transform = torch.jit.load(path)
+>>>
+>>>> # Run transform
+>>> waveform, input_sample_rate = torchaudio.load("input.wav")
+>>> waveform, sample_rate = transform(waveform, input_sample_rate)
+>>> assert sample_rate == 8000
+
+
+
+
+
+ +
+
+

Applying effects on file

+
+
+torchaudio.sox_effects.apply_effects_file(path: str, effects: List[List[str]], normalize: bool = True, channels_first: bool = True, format: Optional[str] = None) → Tuple[torch.Tensor, int][source]
+

Apply sox effects to the audio file and load the resulting data as Tensor

+
+

Note

+

This function works in the way very similar to sox command, however there are slight +differences. For example, sox commnad adds certain effects automatically (such as +rate effect after speed, pitch etc), but this function only applies the given +effects. Therefore, to actually apply speed effect, you also need to give rate +effect with desired sampling rate, because internally, speed effects only alter sampling +rate and leave samples untouched.

+
+
+
Parameters
+
    +
  • path (path-like object or file-like object) –

    Source of audio data. When the function is not compiled by TorchScript, +(e.g. torch.jit.script), the following types are accepted:

    +
    +
      +
    • path-like: file path

    • +
    • file-like: Object with read(size: int) -> bytes method, +which returns byte string of at most size length.

    • +
    +
    +

    When the function is compiled by TorchScript, only str type is allowed.

    +

    Note: This argument is intentionally annotated as str only for +TorchScript compiler compatibility.

    +

  • +
  • effects (List[List[str]]) – List of effects.

  • +
  • normalize (bool) – When True, this function always return float32, and sample values are +normalized to [-1.0, 1.0]. +If input file is integer WAV, giving False will change the resulting Tensor type to +integer type. This argument has no effect for formats other +than integer WAV type.

  • +
  • channels_first (bool) – When True, the returned Tensor has dimension [channel, time]. +Otherwise, the returned Tensor’s dimension is [time, channel].

  • +
  • format (str, optional) – Override the format detection with the given format. +Providing the argument might help when libsox can not infer the format +from header or extension,

  • +
+
+
Returns
+

Resulting Tensor and sample rate. +If normalize=True, the resulting Tensor is always float32 type. +If normalize=False and the input audio file is of integer WAV file, then the +resulting Tensor has corresponding integer type. (Note 24 bit integer type is not supported) +If channels_first=True, the resulting Tensor has dimension [channel, time], +otherwise [time, channel].

+
+
Return type
+

Tuple[torch.Tensor, int]

+
+
+
+
Example - Basic usage
>>>
+>>> # Defines the effects to apply
+>>> effects = [
+...     ['gain', '-n'],  # normalises to 0dB
+...     ['pitch', '5'],  # 5 cent pitch shift
+...     ['rate', '8000'],  # resample to 8000 Hz
+... ]
+>>>
+>>> # Apply effects and load data with channels_first=True
+>>> waveform, sample_rate = apply_effects_file("data.wav", effects, channels_first=True)
+>>>
+>>> # Check the result
+>>> waveform.shape
+torch.Size([2, 8000])
+>>> waveform
+tensor([[ 5.1151e-03,  1.8073e-02,  2.2188e-02,  ...,  1.0431e-07,
+         -1.4761e-07,  1.8114e-07],
+        [-2.6924e-03,  2.1860e-03,  1.0650e-02,  ...,  6.4122e-07,
+         -5.6159e-07,  4.8103e-07]])
+>>> sample_rate
+8000
+
+
+
+
Example - Apply random speed perturbation to dataset
>>>
+>>> # Load data from file, apply random speed perturbation
+>>> class RandomPerturbationFile(torch.utils.data.Dataset):
+...     """Given flist, apply random speed perturbation
+...
+...     Suppose all the input files are at least one second long.
+...     """
+...     def __init__(self, flist: List[str], sample_rate: int):
+...         super().__init__()
+...         self.flist = flist
+...         self.sample_rate = sample_rate
+...
+...     def __getitem__(self, index):
+...         speed = 0.5 + 1.5 * random.randn()
+...         effects = [
+...             ['gain', '-n', '-10'],  # apply 10 db attenuation
+...             ['remix', '-'],  # merge all the channels
+...             ['speed', f'{speed:.5f}'],  # duration is now 0.5 ~ 2.0 seconds.
+...             ['rate', f'{self.sample_rate}'],
+...             ['pad', '0', '1.5'],  # add 1.5 seconds silence at the end
+...             ['trim', '0', '2'],  # get the first 2 seconds
+...         ]
+...         waveform, _ = torchaudio.sox_effects.apply_effects_file(
+...             self.flist[index], effects)
+...         return waveform
+...
+...     def __len__(self):
+...         return len(self.flist)
+...
+>>> dataset = RandomPerturbationFile(file_list, sample_rate=8000)
+>>> loader = torch.utils.data.DataLoader(dataset, batch_size=32)
+>>> for batch in loader:
+>>>     pass
+
+
+
+
+
+ +
+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/torchaudio.html b/0.9.0/torchaudio.html new file mode 100644 index 0000000000..f28b7a7c6b --- /dev/null +++ b/0.9.0/torchaudio.html @@ -0,0 +1,713 @@ + + + + + + + + + + + + + torchaudio — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchaudio

+
+

I/O functionalities

+

Audio I/O functions are implemented in torchaudio.backend module, but for the ease of use, the following functions are made available on torchaudio module. There are different backends available and you can switch backends with set_audio_backend().

+

Refer to torchaudio.backend for the detail.

+
+
+torchaudio.info(filepath: str, ...)
+

Fetch meta data of an audio file. Refer to torchaudio.backend for the detail.

+
+ +
+
+torchaudio.load(filepath: str, ...)
+

Load audio file into torch.Tensor object. Refer to torchaudio.backend for the detail.

+
+ +
+
+torchaudio.save(filepath: str, src: torch.Tensor, sample_rate: int, ...)
+

Save torch.Tensor object into an audio format. Refer to torchaudio.backend for the detail.

+
+ +
+
+

Backend Utilities

+
+
+torchaudio.list_audio_backends() → List[str][source]
+

List available backends

+
+
Returns
+

The list of available backends.

+
+
Return type
+

List[str]

+
+
+
+ +
+
+torchaudio.get_audio_backend() → Optional[str][source]
+

Get the name of the current backend

+
+
Returns
+

The name of the current backend or None if no backend is assigned.

+
+
Return type
+

Optional[str]

+
+
+
+ +
+
+torchaudio.set_audio_backend(backend: Optional[str])[source]
+

Set the backend for I/O operation

+
+
Parameters
+

backend (Optional[str]) – Name of the backend. +One of "sox_io" or "soundfile" based on availability +of the system. If None is provided the current backend is unassigned.

+
+
+
+ +
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/transforms.html b/0.9.0/transforms.html new file mode 100644 index 0000000000..eeb52b1c02 --- /dev/null +++ b/0.9.0/transforms.html @@ -0,0 +1,1552 @@ + + + + + + + + + + + + + torchaudio.transforms — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • torchaudio.transforms
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchaudio.transforms

+

Transforms are common audio transforms. They can be chained together using torch.nn.Sequential

+
+

Spectrogram

+
+
+class torchaudio.transforms.Spectrogram(n_fft: int = 400, win_length: Optional[int] = None, hop_length: Optional[int] = None, pad: int = 0, window_fn: Callable[[...], torch.Tensor] = <built-in method hann_window of type object>, power: Optional[float] = 2.0, normalized: bool = False, wkwargs: Optional[dict] = None, center: bool = True, pad_mode: str = 'reflect', onesided: bool = True, return_complex: bool = False)[source]
+

Create a spectrogram from a audio signal.

+
+
Parameters
+
    +
  • n_fft (int, optional) – Size of FFT, creates n_fft // 2 + 1 bins. (Default: 400)

  • +
  • win_length (int or None, optional) – Window size. (Default: n_fft)

  • +
  • hop_length (int or None, optional) – Length of hop between STFT windows. (Default: win_length // 2)

  • +
  • pad (int, optional) – Two sided padding of signal. (Default: 0)

  • +
  • window_fn (Callable[.., Tensor], optional) – A function to create a window tensor +that is applied/multiplied to each frame/window. (Default: torch.hann_window)

  • +
  • power (float or None, optional) – Exponent for the magnitude spectrogram, +(must be > 0) e.g., 1 for energy, 2 for power, etc. +If None, then the complex spectrum is returned instead. (Default: 2)

  • +
  • normalized (bool, optional) – Whether to normalize by magnitude after stft. (Default: False)

  • +
  • wkwargs (dict or None, optional) – Arguments for window function. (Default: None)

  • +
  • center (bool, optional) – whether to pad waveform on both sides so +that the \(t\)-th frame is centered at time \(t \times \text{hop\_length}\). +(Default: True)

  • +
  • pad_mode (string, optional) – controls the padding method used when +center is True. (Default: "reflect")

  • +
  • onesided (bool, optional) – controls whether to return half of results to +avoid redundancy (Default: True)

  • +
  • return_complex (bool, optional) – Indicates whether the resulting complex-valued Tensor should be represented with +native complex dtype, such as torch.cfloat and torch.cdouble, or real dtype +mimicking complex value with an extra dimension for real and imaginary parts. +This argument is only effective when power=None. +See also torch.view_as_real.

  • +
+
+
+
+
+forward(waveform: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

waveform (Tensor) – Tensor of audio of dimension (…, time).

+
+
Returns
+

Dimension (…, freq, time), where freq is +n_fft // 2 + 1 where n_fft is the number of +Fourier bins, and time is the number of window hops (n_frame).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

GriffinLim

+
+
+class torchaudio.transforms.GriffinLim(n_fft: int = 400, n_iter: int = 32, win_length: Optional[int] = None, hop_length: Optional[int] = None, window_fn: Callable[[...], torch.Tensor] = <built-in method hann_window of type object>, power: float = 2.0, wkwargs: Optional[dict] = None, momentum: float = 0.99, length: Optional[int] = None, rand_init: bool = True)[source]
+

Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.

+

Implementation ported from +librosa [1], A fast Griffin-Lim algorithm [2] +and Signal estimation from modified short-time Fourier transform [3].

+
+
Parameters
+
    +
  • n_fft (int, optional) – Size of FFT, creates n_fft // 2 + 1 bins. (Default: 400)

  • +
  • n_iter (int, optional) – Number of iteration for phase recovery process. (Default: 32)

  • +
  • win_length (int or None, optional) – Window size. (Default: n_fft)

  • +
  • hop_length (int or None, optional) – Length of hop between STFT windows. (Default: win_length // 2)

  • +
  • window_fn (Callable[.., Tensor], optional) – A function to create a window tensor +that is applied/multiplied to each frame/window. (Default: torch.hann_window)

  • +
  • power (float, optional) – Exponent for the magnitude spectrogram, +(must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: 2)

  • +
  • wkwargs (dict or None, optional) – Arguments for window function. (Default: None)

  • +
  • momentum (float, optional) – The momentum parameter for fast Griffin-Lim. +Setting this to 0 recovers the original Griffin-Lim method. +Values near 1 can lead to faster convergence, but above 1 may not converge. (Default: 0.99)

  • +
  • length (int, optional) – Array length of the expected output. (Default: None)

  • +
  • rand_init (bool, optional) – Initializes phase randomly if True and to zero otherwise. (Default: True)

  • +
+
+
+
+
+forward(specgram: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

specgram (Tensor) – A magnitude-only STFT spectrogram of dimension (…, freq, frames) +where freq is n_fft // 2 + 1.

+
+
Returns
+

waveform of (…, time), where time equals the length parameter if given.

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

AmplitudeToDB

+
+
+class torchaudio.transforms.AmplitudeToDB(stype: str = 'power', top_db: Optional[float] = None)[source]
+

Turn a tensor from the power/amplitude scale to the decibel scale.

+

This output depends on the maximum value in the input tensor, and so +may return different values for an audio clip split into snippets vs. a +a full clip.

+
+
Parameters
+
    +
  • stype (str, optional) – scale of input tensor (‘power’ or ‘magnitude’). The +power being the elementwise square of the magnitude. (Default: 'power')

  • +
  • top_db (float, optional) – minimum negative cut-off in decibels. A reasonable number +is 80. (Default: None)

  • +
+
+
+
+
+forward(x: torch.Tensor) → torch.Tensor[source]
+

Numerically stable implementation from Librosa.

+

https://librosa.org/doc/latest/generated/librosa.amplitude_to_db.html

+
+
Parameters
+

x (Tensor) – Input tensor before being converted to decibel scale.

+
+
Returns
+

Output tensor in decibel scale.

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

MelScale

+
+
+class torchaudio.transforms.MelScale(n_mels: int = 128, sample_rate: int = 16000, f_min: float = 0.0, f_max: Optional[float] = None, n_stft: Optional[int] = None, norm: Optional[str] = None, mel_scale: str = 'htk')[source]
+

Turn a normal STFT into a mel frequency STFT, using a conversion +matrix. This uses triangular filter banks.

+

User can control which device the filter bank (fb) is (e.g. fb.to(spec_f.device)).

+
+
Parameters
+
    +
  • n_mels (int, optional) – Number of mel filterbanks. (Default: 128)

  • +
  • sample_rate (int, optional) – Sample rate of audio signal. (Default: 16000)

  • +
  • f_min (float, optional) – Minimum frequency. (Default: 0.)

  • +
  • f_max (float or None, optional) – Maximum frequency. (Default: sample_rate // 2)

  • +
  • n_stft (int, optional) – Number of bins in STFT. Calculated from first input +if None is given. See n_fft in Spectrogram. (Default: None)

  • +
  • norm (Optional[str]) – If ‘slaney’, divide the triangular mel weights by the width of the mel band

  • +
  • normalization). (Default ((area) – None)

  • +
  • mel_scale (str, optional) – Scale to use: htk or slaney. (Default: htk)

  • +
+
+
+
+
+forward(specgram: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

specgram (Tensor) – A spectrogram STFT of dimension (…, freq, time).

+
+
Returns
+

Mel frequency spectrogram of size (…, n_mels, time).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

InverseMelScale

+
+
+class torchaudio.transforms.InverseMelScale(n_stft: int, n_mels: int = 128, sample_rate: int = 16000, f_min: float = 0.0, f_max: Optional[float] = None, max_iter: int = 100000, tolerance_loss: float = 1e-05, tolerance_change: float = 1e-08, sgdargs: Optional[dict] = None, norm: Optional[str] = None, mel_scale: str = 'htk')[source]
+

Solve for a normal STFT from a mel frequency STFT, using a conversion +matrix. This uses triangular filter banks.

+

It minimizes the euclidian norm between the input mel-spectrogram and the product between +the estimated spectrogram and the filter banks using SGD.

+
+
Parameters
+
    +
  • n_stft (int) – Number of bins in STFT. See n_fft in Spectrogram.

  • +
  • n_mels (int, optional) – Number of mel filterbanks. (Default: 128)

  • +
  • sample_rate (int, optional) – Sample rate of audio signal. (Default: 16000)

  • +
  • f_min (float, optional) – Minimum frequency. (Default: 0.)

  • +
  • f_max (float or None, optional) – Maximum frequency. (Default: sample_rate // 2)

  • +
  • max_iter (int, optional) – Maximum number of optimization iterations. (Default: 100000)

  • +
  • tolerance_loss (float, optional) – Value of loss to stop optimization at. (Default: 1e-5)

  • +
  • tolerance_change (float, optional) – Difference in losses to stop optimization at. (Default: 1e-8)

  • +
  • sgdargs (dict or None, optional) – Arguments for the SGD optimizer. (Default: None)

  • +
  • norm (Optional[str]) – If ‘slaney’, divide the triangular mel weights by the width of the mel band +(area normalization). (Default: None)

  • +
  • mel_scale (str, optional) – Scale to use: htk or slaney. (Default: htk)

  • +
+
+
+
+
+forward(melspec: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

melspec (Tensor) – A Mel frequency spectrogram of dimension (…, n_mels, time)

+
+
Returns
+

Linear scale spectrogram of size (…, freq, time)

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

MelSpectrogram

+
+
+class torchaudio.transforms.MelSpectrogram(sample_rate: int = 16000, n_fft: int = 400, win_length: Optional[int] = None, hop_length: Optional[int] = None, f_min: float = 0.0, f_max: Optional[float] = None, pad: int = 0, n_mels: int = 128, window_fn: Callable[[...], torch.Tensor] = <built-in method hann_window of type object>, power: float = 2.0, normalized: bool = False, wkwargs: Optional[dict] = None, center: bool = True, pad_mode: str = 'reflect', onesided: bool = True, norm: Optional[str] = None, mel_scale: str = 'htk')[source]
+

Create MelSpectrogram for a raw audio signal. This is a composition of Spectrogram +and MelScale.

+
+
Sources
+
+
+
+
Parameters
+
    +
  • sample_rate (int, optional) – Sample rate of audio signal. (Default: 16000)

  • +
  • n_fft (int, optional) – Size of FFT, creates n_fft // 2 + 1 bins. (Default: 400)

  • +
  • win_length (int or None, optional) – Window size. (Default: n_fft)

  • +
  • hop_length (int or None, optional) – Length of hop between STFT windows. (Default: win_length // 2)

  • +
  • f_min (float, optional) – Minimum frequency. (Default: 0.)

  • +
  • f_max (float or None, optional) – Maximum frequency. (Default: None)

  • +
  • pad (int, optional) – Two sided padding of signal. (Default: 0)

  • +
  • n_mels (int, optional) – Number of mel filterbanks. (Default: 128)

  • +
  • window_fn (Callable[.., Tensor], optional) – A function to create a window tensor +that is applied/multiplied to each frame/window. (Default: torch.hann_window)

  • +
  • power (float, optional) – Exponent for the magnitude spectrogram, +(must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: 2)

  • +
  • normalized (bool, optional) – Whether to normalize by magnitude after stft. (Default: False)

  • +
  • wkwargs (Dict[.., ..] or None, optional) – Arguments for window function. (Default: None)

  • +
  • center (bool, optional) – whether to pad waveform on both sides so +that the \(t\)-th frame is centered at time \(t \times \text{hop\_length}\). +(Default: True)

  • +
  • pad_mode (string, optional) – controls the padding method used when +center is True. (Default: "reflect")

  • +
  • onesided (bool, optional) – controls whether to return half of results to +avoid redundancy. (Default: True)

  • +
  • norm (Optional[str]) – If ‘slaney’, divide the triangular mel weights by the width of the mel band +(area normalization). (Default: None)

  • +
  • mel_scale (str, optional) – Scale to use: htk or slaney. (Default: htk)

  • +
+
+
+
+
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalization=True)
+>>> mel_specgram = transforms.MelSpectrogram(sample_rate)(waveform)  # (channel, n_mels, time)
+
+
+
+
+
+
+forward(waveform: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

waveform (Tensor) – Tensor of audio of dimension (…, time).

+
+
Returns
+

Mel frequency spectrogram of size (…, n_mels, time).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

MFCC

+
+
+class torchaudio.transforms.MFCC(sample_rate: int = 16000, n_mfcc: int = 40, dct_type: int = 2, norm: str = 'ortho', log_mels: bool = False, melkwargs: Optional[dict] = None)[source]
+

Create the Mel-frequency cepstrum coefficients from an audio signal.

+

By default, this calculates the MFCC on the DB-scaled Mel spectrogram. +This is not the textbook implementation, but is implemented here to +give consistency with librosa.

+

This output depends on the maximum value in the input spectrogram, and so +may return different values for an audio clip split into snippets vs. a +a full clip.

+
+
Parameters
+
    +
  • sample_rate (int, optional) – Sample rate of audio signal. (Default: 16000)

  • +
  • n_mfcc (int, optional) – Number of mfc coefficients to retain. (Default: 40)

  • +
  • dct_type (int, optional) – type of DCT (discrete cosine transform) to use. (Default: 2)

  • +
  • norm (str, optional) – norm to use. (Default: 'ortho')

  • +
  • log_mels (bool, optional) – whether to use log-mel spectrograms instead of db-scaled. (Default: False)

  • +
  • melkwargs (dict or None, optional) – arguments for MelSpectrogram. (Default: None)

  • +
+
+
+
+
+forward(waveform: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

waveform (Tensor) – Tensor of audio of dimension (…, time).

+
+
Returns
+

specgram_mel_db of size (…, n_mfcc, time).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

MuLawEncoding

+
+
+class torchaudio.transforms.MuLawEncoding(quantization_channels: int = 256)[source]
+

Encode signal based on mu-law companding. For more info see the +Wikipedia Entry

+

This algorithm assumes the signal has been scaled to between -1 and 1 and +returns a signal encoded with values from 0 to quantization_channels - 1

+
+
Parameters
+

quantization_channels (int, optional) – Number of channels. (Default: 256)

+
+
+
+
+forward(x: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

x (Tensor) – A signal to be encoded.

+
+
Returns
+

An encoded signal.

+
+
Return type
+

x_mu (Tensor)

+
+
+
+ +
+ +
+
+

MuLawDecoding

+
+
+class torchaudio.transforms.MuLawDecoding(quantization_channels: int = 256)[source]
+

Decode mu-law encoded signal. For more info see the +Wikipedia Entry

+

This expects an input with values between 0 and quantization_channels - 1 +and returns a signal scaled between -1 and 1.

+
+
Parameters
+

quantization_channels (int, optional) – Number of channels. (Default: 256)

+
+
+
+
+forward(x_mu: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

x_mu (Tensor) – A mu-law encoded signal which needs to be decoded.

+
+
Returns
+

The signal decoded.

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

Resample

+
+
+class torchaudio.transforms.Resample(orig_freq: float = 16000, new_freq: float = 16000, resampling_method: str = 'sinc_interpolation', lowpass_filter_width: int = 6, rolloff: float = 0.99, beta: Optional[float] = None, *, dtype: Optional[torch.dtype] = None)[source]
+

Resample a signal from one frequency to another. A resampling method can be given.

+
+

Note

+

If resampling on waveforms of higher precision than float32, there may be a small loss of precision +because the kernel is cached once as float32. If high precision resampling is important for your application, +the functional form will retain higher precision, but run slower because it does not cache the kernel. +Alternatively, you could rewrite a transform that caches a higher precision kernel.

+
+
+
Parameters
+
    +
  • orig_freq (float, optional) – The original frequency of the signal. (Default: 16000)

  • +
  • new_freq (float, optional) – The desired frequency. (Default: 16000)

  • +
  • resampling_method (str, optional) – The resampling method to use. +Options: [sinc_interpolation, kaiser_window] (Default: 'sinc_interpolation')

  • +
  • lowpass_filter_width (int, optional) – Controls the sharpness of the filter, more == sharper +but less efficient. (Default: 6)

  • +
  • rolloff (float, optional) – The roll-off frequency of the filter, as a fraction of the Nyquist. +Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: 0.99)

  • +
  • beta (float or None) – The shape parameter used for kaiser window.

  • +
  • dtype (torch.device, optional) – Determnines the precision that resampling kernel is pre-computed and cached. If not provided, +kernel is computed with torch.float64 then cached as torch.float32. +If you need higher precision, provide torch.float64, and the pre-computed kernel is computed and +cached as torch.float64. If you use resample with lower precision, then instead of providing this +providing this argument, please use Resample.to(dtype), so that the kernel generation is still +carried out on torch.float64.

  • +
+
+
+
+
+forward(waveform: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

waveform (Tensor) – Tensor of audio of dimension (…, time).

+
+
Returns
+

Output signal of dimension (…, time).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

ComplexNorm

+
+
+class torchaudio.transforms.ComplexNorm(power: float = 1.0)[source]
+

Compute the norm of complex tensor input.

+
+
Parameters
+

power (float, optional) – Power of the norm. (Default: to 1.0)

+
+
+
+
+forward(complex_tensor: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

complex_tensor (Tensor) – Tensor shape of (…, complex=2).

+
+
Returns
+

norm of the input tensor, shape of (…, ).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

ComputeDeltas

+
+
+class torchaudio.transforms.ComputeDeltas(win_length: int = 5, mode: str = 'replicate')[source]
+

Compute delta coefficients of a tensor, usually a spectrogram.

+

See torchaudio.functional.compute_deltas for more details.

+
+
Parameters
+
    +
  • win_length (int) – The window length used for computing delta. (Default: 5)

  • +
  • mode (str) – Mode parameter passed to padding. (Default: 'replicate')

  • +
+
+
+
+
+forward(specgram: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

specgram (Tensor) – Tensor of audio of dimension (…, freq, time).

+
+
Returns
+

Tensor of deltas of dimension (…, freq, time).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

TimeStretch

+
+
+class torchaudio.transforms.TimeStretch(hop_length: Optional[int] = None, n_freq: int = 201, fixed_rate: Optional[float] = None)[source]
+

Stretch stft in time without modifying pitch for a given rate.

+
+
Parameters
+
    +
  • hop_length (int or None, optional) – Length of hop between STFT windows. (Default: win_length // 2)

  • +
  • n_freq (int, optional) – number of filter banks from stft. (Default: 201)

  • +
  • fixed_rate (float or None, optional) – rate to speed up or slow down by. +If None is provided, rate must be passed to the forward method. (Default: None)

  • +
+
+
+
+
+forward(complex_specgrams: torch.Tensor, overriding_rate: Optional[float] = None) → torch.Tensor[source]
+
+
Parameters
+
    +
  • complex_specgrams (Tensor) – Either a real tensor of dimension of (..., freq, num_frame, complex=2) +or a tensor of dimension (..., freq, num_frame) with complex dtype.

  • +
  • overriding_rate (float or None, optional) – speed up to apply to this batch. +If no rate is passed, use self.fixed_rate. (Default: None)

  • +
+
+
Returns
+

Stretched spectrogram. The resulting tensor is of the same dtype as the input +spectrogram, but the number of frames is changed to ceil(num_frame / rate).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

Fade

+
+
+class torchaudio.transforms.Fade(fade_in_len: int = 0, fade_out_len: int = 0, fade_shape: str = 'linear')[source]
+

Add a fade in and/or fade out to an waveform.

+
+
Parameters
+
    +
  • fade_in_len (int, optional) – Length of fade-in (time frames). (Default: 0)

  • +
  • fade_out_len (int, optional) – Length of fade-out (time frames). (Default: 0)

  • +
  • fade_shape (str, optional) – Shape of fade. Must be one of: “quarter_sine”, +“half_sine”, “linear”, “logarithmic”, “exponential”. (Default: "linear")

  • +
+
+
+
+
+forward(waveform: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

waveform (Tensor) – Tensor of audio of dimension (…, time).

+
+
Returns
+

Tensor of audio of dimension (…, time).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

FrequencyMasking

+
+
+class torchaudio.transforms.FrequencyMasking(freq_mask_param: int, iid_masks: bool = False)[source]
+

Apply masking to a spectrogram in the frequency domain.

+
+
Parameters
+
    +
  • freq_mask_param (int) – maximum possible length of the mask. +Indices uniformly sampled from [0, freq_mask_param).

  • +
  • iid_masks (bool, optional) – whether to apply different masks to each +example/channel in the batch. (Default: False) +This option is applicable only when the input tensor is 4D.

  • +
+
+
+
+
+forward(specgram: torch.Tensor, mask_value: float = 0.0) → torch.Tensor
+
+
Parameters
+
    +
  • specgram (Tensor) – Tensor of dimension (…, freq, time).

  • +
  • mask_value (float) – Value to assign to the masked columns.

  • +
+
+
Returns
+

Masked spectrogram of dimensions (…, freq, time).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

TimeMasking

+
+
+class torchaudio.transforms.TimeMasking(time_mask_param: int, iid_masks: bool = False)[source]
+

Apply masking to a spectrogram in the time domain.

+
+
Parameters
+
    +
  • time_mask_param (int) – maximum possible length of the mask. +Indices uniformly sampled from [0, time_mask_param).

  • +
  • iid_masks (bool, optional) – whether to apply different masks to each +example/channel in the batch. (Default: False) +This option is applicable only when the input tensor is 4D.

  • +
+
+
+
+
+forward(specgram: torch.Tensor, mask_value: float = 0.0) → torch.Tensor
+
+
Parameters
+
    +
  • specgram (Tensor) – Tensor of dimension (…, freq, time).

  • +
  • mask_value (float) – Value to assign to the masked columns.

  • +
+
+
Returns
+

Masked spectrogram of dimensions (…, freq, time).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

Vol

+
+
+class torchaudio.transforms.Vol(gain: float, gain_type: str = 'amplitude')[source]
+

Add a volume to an waveform.

+
+
Parameters
+
    +
  • gain (float) – Interpreted according to the given gain_type: +If gain_type = amplitude, gain is a positive amplitude ratio. +If gain_type = power, gain is a power (voltage squared). +If gain_type = db, gain is in decibels.

  • +
  • gain_type (str, optional) – Type of gain. One of: amplitude, power, db (Default: amplitude)

  • +
+
+
+
+
+forward(waveform: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

waveform (Tensor) – Tensor of audio of dimension (…, time).

+
+
Returns
+

Tensor of audio of dimension (…, time).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

SlidingWindowCmn

+
+
+class torchaudio.transforms.SlidingWindowCmn(cmn_window: int = 600, min_cmn_window: int = 100, center: bool = False, norm_vars: bool = False)[source]
+

Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.

+
+
Parameters
+
    +
  • cmn_window (int, optional) – Window in frames for running average CMN computation (int, default = 600)

  • +
  • min_cmn_window (int, optional) – Minimum CMN window used at start of decoding (adds latency only at start). +Only applicable if center == false, ignored if center==true (int, default = 100)

  • +
  • center (bool, optional) – If true, use a window centered on the current frame +(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)

  • +
  • norm_vars (bool, optional) – If true, normalize variance to one. (bool, default = false)

  • +
+
+
+
+
+forward(waveform: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

waveform (Tensor) – Tensor of audio of dimension (…, time).

+
+
Returns
+

Tensor of audio of dimension (…, time).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

SpectralCentroid

+
+
+class torchaudio.transforms.SpectralCentroid(sample_rate: int, n_fft: int = 400, win_length: Optional[int] = None, hop_length: Optional[int] = None, pad: int = 0, window_fn: Callable[[...], torch.Tensor] = <built-in method hann_window of type object>, wkwargs: Optional[dict] = None)[source]
+

Compute the spectral centroid for each channel along the time axis.

+

The spectral centroid is defined as the weighted average of the +frequency values, weighted by their magnitude.

+
+
Parameters
+
    +
  • sample_rate (int) – Sample rate of audio signal.

  • +
  • n_fft (int, optional) – Size of FFT, creates n_fft // 2 + 1 bins. (Default: 400)

  • +
  • win_length (int or None, optional) – Window size. (Default: n_fft)

  • +
  • hop_length (int or None, optional) – Length of hop between STFT windows. (Default: win_length // 2)

  • +
  • pad (int, optional) – Two sided padding of signal. (Default: 0)

  • +
  • window_fn (Callable[.., Tensor], optional) – A function to create a window tensor +that is applied/multiplied to each frame/window. (Default: torch.hann_window)

  • +
  • wkwargs (dict or None, optional) – Arguments for window function. (Default: None)

  • +
+
+
+
+
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalization=True)
+>>> spectral_centroid = transforms.SpectralCentroid(sample_rate)(waveform)  # (channel, time)
+
+
+
+
+
+
+forward(waveform: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

waveform (Tensor) – Tensor of audio of dimension (…, time).

+
+
Returns
+

Spectral Centroid of size (…, time).

+
+
Return type
+

Tensor

+
+
+
+ +
+ +
+
+

Vad

+
+
+class torchaudio.transforms.Vad(sample_rate: int, trigger_level: float = 7.0, trigger_time: float = 0.25, search_time: float = 1.0, allowed_gap: float = 0.25, pre_trigger_time: float = 0.0, boot_time: float = 0.35, noise_up_time: float = 0.1, noise_down_time: float = 0.01, noise_reduction_amount: float = 1.35, measure_freq: float = 20.0, measure_duration: Optional[float] = None, measure_smooth_time: float = 0.4, hp_filter_freq: float = 50.0, lp_filter_freq: float = 6000.0, hp_lifter_freq: float = 150.0, lp_lifter_freq: float = 2000.0)[source]
+

Voice Activity Detector. Similar to SoX implementation. +Attempts to trim silence and quiet background sounds from the ends of recordings of speech. +The algorithm currently uses a simple cepstral power measurement to detect voice, +so may be fooled by other things, especially music.

+

The effect can trim only from the front of the audio, +so in order to trim from the back, the reverse effect must also be used.

+
+
Parameters
+
    +
  • sample_rate (int) – Sample rate of audio signal.

  • +
  • trigger_level (float, optional) – The measurement level used to trigger activity detection. +This may need to be cahnged depending on the noise level, signal level, +and other characteristics of the input audio. (Default: 7.0)

  • +
  • trigger_time (float, optional) – The time constant (in seconds) +used to help ignore short bursts of sound. (Default: 0.25)

  • +
  • search_time (float, optional) – The amount of audio (in seconds) +to search for quieter/shorter bursts of audio to include prior +to the detected trigger point. (Default: 1.0)

  • +
  • allowed_gap (float, optional) – The allowed gap (in seconds) between +quiteter/shorter bursts of audio to include prior +to the detected trigger point. (Default: 0.25)

  • +
  • pre_trigger_time (float, optional) – The amount of audio (in seconds) to preserve +before the trigger point and any found quieter/shorter bursts. (Default: 0.0)

  • +
  • boot_time (float, optional) The algorithm (internally) – estimation/reduction in order to detect the start of the wanted audio. +This option sets the time for the initial noise estimate. (Default: 0.35)

  • +
  • noise_up_time (float, optional) – for when the noise level is increasing. (Default: 0.1)

  • +
  • noise_down_time (float, optional) – for when the noise level is decreasing. (Default: 0.01)

  • +
  • noise_reduction_amount (float, optional) – the detection algorithm (e.g. 0, 0.5, …). (Default: 1.35)

  • +
  • measure_freq (float, optional) – processing/measurements. (Default: 20.0)

  • +
  • measure_duration – (float, optional) Measurement duration. +(Default: Twice the measurement period; i.e. with overlap.)

  • +
  • measure_smooth_time (float, optional) – spectral measurements. (Default: 0.4)

  • +
  • hp_filter_freq (float, optional) – at the input to the detector algorithm. (Default: 50.0)

  • +
  • lp_filter_freq (float, optional) – at the input to the detector algorithm. (Default: 6000.0)

  • +
  • hp_lifter_freq (float, optional) – in the detector algorithm. (Default: 150.0)

  • +
  • lp_lifter_freq (float, optional) – in the detector algorithm. (Default: 2000.0)

  • +
+
+
+
+
Reference:
+
+
+
+
+forward(waveform: torch.Tensor) → torch.Tensor[source]
+
+
Parameters
+

waveform (Tensor) – Tensor of audio of dimension (channels, time) or (time) +Tensor of shape (channels, time) is treated as a multi-channel recording +of the same event and the resulting output will be trimmed to the earliest +voice activity in any channel.

+
+
+
+ +
+ +
+
+

References

+

+
1
+

Brian McFee, Colin Raffel, Dawen Liang, Daniel P.W. Ellis, Matt McVicar, Eric Battenberg, and Oriol Nieto. Librosa: Audio and Music Signal Analysis in Python. In Kathryn Huff and James Bergstra, editors, Proceedings of the 14th Python in Science Conference, 18 – 24. 2015. doi:10.25080/Majora-7b98e3ed-003.

+
+
2
+

Nathanaël Perraudin, Peter Balazs, and Peter L. Søndergaard. A fast griffin-lim algorithm. In 2013 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics, volume, 1–4. 2013. doi:10.1109/WASPAA.2013.6701851.

+
+
3
+

D. Griffin and Jae Lim. Signal estimation from modified short-time fourier transform. In ICASSP ‘83. IEEE International Conference on Acoustics, Speech, and Signal Processing, volume 8, 804–807. 1983. doi:10.1109/ICASSP.1983.1172092.

+
+
+

+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/0.9.0/utils.html b/0.9.0/utils.html new file mode 100644 index 0000000000..c0c8e51a5a --- /dev/null +++ b/0.9.0/utils.html @@ -0,0 +1,757 @@ + + + + + + + + + + + + + torchaudio.utils — Torchaudio 0.9.0a0+33b2469 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

torchaudio.utils

+
+

torchaudio.utils.sox_utils

+

Utility module to configure libsox. +This affects functionalities in Sox IO backend and Sox Effects.

+
+
+torchaudio.utils.sox_utils.list_effects() → Dict[str, str][source]
+

List the available sox effect names

+
+
Returns
+

Mapping from effect name to usage

+
+
Return type
+

Dict[str, str]

+
+
+
+ +
+
+torchaudio.utils.sox_utils.list_read_formats() → List[str][source]
+

List the supported audio formats for read

+
+
Returns
+

List of supported audio formats

+
+
Return type
+

List[str]

+
+
+
+ +
+
+torchaudio.utils.sox_utils.list_write_formats() → List[str][source]
+

List the supported audio formats for write

+
+
Returns
+

List of supported audio formats

+
+
Return type
+

List[str]

+
+
+
+ +
+
+torchaudio.utils.sox_utils.set_buffer_size(buffer_size: int)[source]
+

Set buffer size for sox effect chain

+
+
Parameters
+

buffer_size (int) – Set the size in bytes of the buffers used for processing audio.

+
+
+ +
+ +
+
+torchaudio.utils.sox_utils.set_seed(seed: int)[source]
+

Set libsox’s PRNG

+
+
Parameters
+

seed (int) – seed value. valid range is int32.

+
+
+ +
+ +
+
+torchaudio.utils.sox_utils.set_use_threads(use_threads: bool)[source]
+

Set multithread option for sox effect chain

+
+
Parameters
+

use_threads (bool) – When True, enables libsox’s parallel effects channels processing. +To use mutlithread, the underlying libsox has to be compiled with OpenMP support.

+
+
+ +
+ +
+
+torchaudio.utils.sox_utils.set_verbosity(verbosity: int)[source]
+

Set libsox’s verbosity

+
+
Parameters
+

verbosity (int) –

Set verbosity level of libsox.

+
    +
  • 1 failure messages

  • +
  • 2 warnings

  • +
  • 3 details of processing

  • +
  • 4-6 increasing levels of debug messages

  • +
+

+
+
+ +
+ +
+
+ + +
+ +
+
+ + + + + + +
+ + + +
+

+ © Copyright 2018, Torchaudio Contributors. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/stable b/stable index 8adc70fdd9..899f24fc75 120000 --- a/stable +++ b/stable @@ -1 +1 @@ -0.8.0 \ No newline at end of file +0.9.0 \ No newline at end of file diff --git a/versions.html b/versions.html index 08b28bad8f..2c45d60c2f 100644 --- a/versions.html +++ b/versions.html @@ -21,7 +21,10 @@

PyTorch Documentation

master (unstable)
  • - v0.8.0 (stable release) + v0.9.0 (stable release) +
  • +
  • + v0.8.0
  • v0.7.0