Skip to content

Commit b29a463

Browse files
jamarshoncpuhrsch
authored andcommitted
[BC] Standardization of Transforms/Functionals (#152)
1 parent 2271a7a commit b29a463

File tree

5 files changed

+365
-839
lines changed

5 files changed

+365
-839
lines changed

test/test_functional.py

Lines changed: 2 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22

33
import torch
44
import torchaudio
5+
import torchaudio.functional as F
6+
import pytest
57
import unittest
68
import test.common_utils
79

@@ -11,10 +13,6 @@
1113
import numpy as np
1214
import librosa
1315

14-
import pytest
15-
import torchaudio.functional as F
16-
xfail = pytest.mark.xfail
17-
1816

1917
class TestFunctional(unittest.TestCase):
2018
data_sizes = [(2, 20), (3, 15), (4, 10)]
@@ -197,54 +195,6 @@ def _num_stft_bins(signal_len, fft_len, hop_length, pad):
197195
return (signal_len + 2 * pad - fft_len + hop_length) // hop_length
198196

199197

200-
@pytest.mark.parametrize('fft_length', [512])
201-
@pytest.mark.parametrize('hop_length', [256])
202-
@pytest.mark.parametrize('waveform', [
203-
(torch.randn(1, 100000)),
204-
(torch.randn(1, 2, 100000)),
205-
pytest.param(torch.randn(1, 100), marks=xfail(raises=RuntimeError)),
206-
])
207-
@pytest.mark.parametrize('pad_mode', [
208-
# 'constant',
209-
'reflect',
210-
])
211-
@unittest.skipIf(not IMPORT_LIBROSA, 'Librosa is not available')
212-
def test_stft(waveform, fft_length, hop_length, pad_mode):
213-
"""
214-
Test STFT for multi-channel signals.
215-
216-
Padding: Value in having padding outside of torch.stft?
217-
"""
218-
pad = fft_length // 2
219-
window = torch.hann_window(fft_length)
220-
complex_spec = F.stft(waveform,
221-
fft_length=fft_length,
222-
hop_length=hop_length,
223-
window=window,
224-
pad_mode=pad_mode)
225-
mag_spec, phase_spec = F.magphase(complex_spec)
226-
227-
# == Test shape
228-
expected_size = list(waveform.size()[:-1])
229-
expected_size += [fft_length // 2 + 1, _num_stft_bins(
230-
waveform.size(-1), fft_length, hop_length, pad), 2]
231-
assert complex_spec.dim() == waveform.dim() + 2
232-
assert complex_spec.size() == torch.Size(expected_size)
233-
234-
# == Test values
235-
fft_config = dict(n_fft=fft_length, hop_length=hop_length, pad_mode=pad_mode)
236-
# note that librosa *automatically* pad with fft_length // 2.
237-
expected_complex_spec = np.apply_along_axis(librosa.stft, -1,
238-
waveform.numpy(), **fft_config)
239-
expected_mag_spec, _ = librosa.magphase(expected_complex_spec)
240-
# Convert torch to np.complex
241-
complex_spec = complex_spec.numpy()
242-
complex_spec = complex_spec[..., 0] + 1j * complex_spec[..., 1]
243-
244-
assert np.allclose(complex_spec, expected_complex_spec, atol=1e-5)
245-
assert np.allclose(mag_spec.numpy(), expected_mag_spec, atol=1e-5)
246-
247-
248198
@pytest.mark.parametrize('rate', [0.5, 1.01, 1.3])
249199
@pytest.mark.parametrize('complex_specgrams', [
250200
torch.randn(1, 2, 1025, 400, 2),

test/test_jit.py

Lines changed: 12 additions & 92 deletions
Original file line numberDiff line numberDiff line change
@@ -30,40 +30,18 @@ def _test_script_module(self, tensor, f, *args):
3030

3131
self.assertTrue(torch.allclose(jit_out, py_out))
3232

33-
def test_torchscript_scale(self):
34-
@torch.jit.script
35-
def jit_method(tensor, factor):
36-
# type: (Tensor, int) -> Tensor
37-
return F.scale(tensor, factor)
38-
39-
tensor = torch.rand((10, 1))
40-
factor = 2
41-
42-
jit_out = jit_method(tensor, factor)
43-
py_out = F.scale(tensor, factor)
44-
45-
self.assertTrue(torch.allclose(jit_out, py_out))
46-
47-
@unittest.skipIf(not RUN_CUDA, "no CUDA")
48-
def test_scriptmodule_scale(self):
49-
tensor = torch.rand((10, 1), device="cuda")
50-
51-
self._test_script_module(tensor, transforms.Scale)
52-
5333
def test_torchscript_pad_trim(self):
5434
@torch.jit.script
55-
def jit_method(tensor, ch_dim, max_len, len_dim, fill_value):
56-
# type: (Tensor, int, int, int, float) -> Tensor
57-
return F.pad_trim(tensor, ch_dim, max_len, len_dim, fill_value)
35+
def jit_method(tensor, max_len, fill_value):
36+
# type: (Tensor, int, float) -> Tensor
37+
return F.pad_trim(tensor, max_len, fill_value)
5838

59-
tensor = torch.rand((10, 1))
60-
ch_dim = 1
39+
tensor = torch.rand((1, 10))
6140
max_len = 5
62-
len_dim = 0
6341
fill_value = 3.
6442

65-
jit_out = jit_method(tensor, ch_dim, max_len, len_dim, fill_value)
66-
py_out = F.pad_trim(tensor, ch_dim, max_len, len_dim, fill_value)
43+
jit_out = jit_method(tensor, max_len, fill_value)
44+
py_out = F.pad_trim(tensor, max_len, fill_value)
6745

6846
self.assertTrue(torch.allclose(jit_out, py_out))
6947

@@ -74,45 +52,6 @@ def test_scriptmodule_pad_trim(self):
7452

7553
self._test_script_module(tensor, transforms.PadTrim, max_len)
7654

77-
def test_torchscript_downmix_mono(self):
78-
@torch.jit.script
79-
def jit_method(tensor, ch_dim):
80-
# type: (Tensor, int) -> Tensor
81-
return F.downmix_mono(tensor, ch_dim)
82-
83-
tensor = torch.rand((10, 1))
84-
ch_dim = 1
85-
86-
jit_out = jit_method(tensor, ch_dim)
87-
py_out = F.downmix_mono(tensor, ch_dim)
88-
89-
self.assertTrue(torch.allclose(jit_out, py_out))
90-
91-
@unittest.skipIf(not RUN_CUDA, "no CUDA")
92-
def test_scriptmodule_downmix_mono(self):
93-
tensor = torch.rand((1, 10), device="cuda")
94-
95-
self._test_script_module(tensor, transforms.DownmixMono)
96-
97-
def test_torchscript_LC2CL(self):
98-
@torch.jit.script
99-
def jit_method(tensor):
100-
# type: (Tensor) -> Tensor
101-
return F.LC2CL(tensor)
102-
103-
tensor = torch.rand((10, 1))
104-
105-
jit_out = jit_method(tensor)
106-
py_out = F.LC2CL(tensor)
107-
108-
self.assertTrue(torch.allclose(jit_out, py_out))
109-
110-
@unittest.skipIf(not RUN_CUDA, "no CUDA")
111-
def test_scriptmodule_LC2CL(self):
112-
tensor = torch.rand((10, 1), device="cuda")
113-
114-
self._test_script_module(tensor, transforms.LC2CL)
115-
11655
def test_torchscript_spectrogram(self):
11756
@torch.jit.script
11857
def jit_method(sig, pad, window, n_fft, hop, ws, power, normalize):
@@ -167,7 +106,7 @@ def jit_method(spec, multiplier, amin, db_multiplier, top_db):
167106
# type: (Tensor, float, float, float, Optional[float]) -> Tensor
168107
return F.spectrogram_to_DB(spec, multiplier, amin, db_multiplier, top_db)
169108

170-
spec = torch.rand((10, 1))
109+
spec = torch.rand((6, 201))
171110
multiplier = 10.
172111
amin = 1e-10
173112
db_multiplier = 0.
@@ -180,7 +119,7 @@ def jit_method(spec, multiplier, amin, db_multiplier, top_db):
180119

181120
@unittest.skipIf(not RUN_CUDA, "no CUDA")
182121
def test_scriptmodule_SpectrogramToDB(self):
183-
spec = torch.rand((10, 1), device="cuda")
122+
spec = torch.rand((6, 201), device="cuda")
184123

185124
self._test_script_module(spec, transforms.SpectrogramToDB)
186125

@@ -211,32 +150,13 @@ def test_scriptmodule_MelSpectrogram(self):
211150

212151
self._test_script_module(tensor, transforms.MelSpectrogram)
213152

214-
def test_torchscript_BLC2CBL(self):
215-
@torch.jit.script
216-
def jit_method(tensor):
217-
# type: (Tensor) -> Tensor
218-
return F.BLC2CBL(tensor)
219-
220-
tensor = torch.rand((10, 1000, 1))
221-
222-
jit_out = jit_method(tensor)
223-
py_out = F.BLC2CBL(tensor)
224-
225-
self.assertTrue(torch.allclose(jit_out, py_out))
226-
227-
@unittest.skipIf(not RUN_CUDA, "no CUDA")
228-
def test_scriptmodule_BLC2CBL(self):
229-
tensor = torch.rand((10, 1000, 1), device="cuda")
230-
231-
self._test_script_module(tensor, transforms.BLC2CBL)
232-
233153
def test_torchscript_mu_law_encoding(self):
234154
@torch.jit.script
235155
def jit_method(tensor, qc):
236156
# type: (Tensor, int) -> Tensor
237157
return F.mu_law_encoding(tensor, qc)
238158

239-
tensor = torch.rand((10, 1))
159+
tensor = torch.rand((1, 10))
240160
qc = 256
241161

242162
jit_out = jit_method(tensor, qc)
@@ -246,7 +166,7 @@ def jit_method(tensor, qc):
246166

247167
@unittest.skipIf(not RUN_CUDA, "no CUDA")
248168
def test_scriptmodule_MuLawEncoding(self):
249-
tensor = torch.rand((10, 1), device="cuda")
169+
tensor = torch.rand((1, 10), device="cuda")
250170

251171
self._test_script_module(tensor, transforms.MuLawEncoding)
252172

@@ -256,7 +176,7 @@ def jit_method(tensor, qc):
256176
# type: (Tensor, int) -> Tensor
257177
return F.mu_law_expanding(tensor, qc)
258178

259-
tensor = torch.rand((10, 1))
179+
tensor = torch.rand((1, 10))
260180
qc = 256
261181

262182
jit_out = jit_method(tensor, qc)
@@ -266,7 +186,7 @@ def jit_method(tensor, qc):
266186

267187
@unittest.skipIf(not RUN_CUDA, "no CUDA")
268188
def test_scriptmodule_MuLawExpanding(self):
269-
tensor = torch.rand((10, 1), device="cuda")
189+
tensor = torch.rand((1, 10), device="cuda")
270190

271191
self._test_script_module(tensor, transforms.MuLawExpanding)
272192

0 commit comments

Comments
 (0)