@@ -30,40 +30,18 @@ def _test_script_module(self, tensor, f, *args):
3030
3131 self .assertTrue (torch .allclose (jit_out , py_out ))
3232
33- def test_torchscript_scale (self ):
34- @torch .jit .script
35- def jit_method (tensor , factor ):
36- # type: (Tensor, int) -> Tensor
37- return F .scale (tensor , factor )
38-
39- tensor = torch .rand ((10 , 1 ))
40- factor = 2
41-
42- jit_out = jit_method (tensor , factor )
43- py_out = F .scale (tensor , factor )
44-
45- self .assertTrue (torch .allclose (jit_out , py_out ))
46-
47- @unittest .skipIf (not RUN_CUDA , "no CUDA" )
48- def test_scriptmodule_scale (self ):
49- tensor = torch .rand ((10 , 1 ), device = "cuda" )
50-
51- self ._test_script_module (tensor , transforms .Scale )
52-
5333 def test_torchscript_pad_trim (self ):
5434 @torch .jit .script
55- def jit_method (tensor , ch_dim , max_len , len_dim , fill_value ):
56- # type: (Tensor, int, int, int, float) -> Tensor
57- return F .pad_trim (tensor , ch_dim , max_len , len_dim , fill_value )
35+ def jit_method (tensor , max_len , fill_value ):
36+ # type: (Tensor, int, float) -> Tensor
37+ return F .pad_trim (tensor , max_len , fill_value )
5838
59- tensor = torch .rand ((10 , 1 ))
60- ch_dim = 1
39+ tensor = torch .rand ((1 , 10 ))
6140 max_len = 5
62- len_dim = 0
6341 fill_value = 3.
6442
65- jit_out = jit_method (tensor , ch_dim , max_len , len_dim , fill_value )
66- py_out = F .pad_trim (tensor , ch_dim , max_len , len_dim , fill_value )
43+ jit_out = jit_method (tensor , max_len , fill_value )
44+ py_out = F .pad_trim (tensor , max_len , fill_value )
6745
6846 self .assertTrue (torch .allclose (jit_out , py_out ))
6947
@@ -74,45 +52,6 @@ def test_scriptmodule_pad_trim(self):
7452
7553 self ._test_script_module (tensor , transforms .PadTrim , max_len )
7654
77- def test_torchscript_downmix_mono (self ):
78- @torch .jit .script
79- def jit_method (tensor , ch_dim ):
80- # type: (Tensor, int) -> Tensor
81- return F .downmix_mono (tensor , ch_dim )
82-
83- tensor = torch .rand ((10 , 1 ))
84- ch_dim = 1
85-
86- jit_out = jit_method (tensor , ch_dim )
87- py_out = F .downmix_mono (tensor , ch_dim )
88-
89- self .assertTrue (torch .allclose (jit_out , py_out ))
90-
91- @unittest .skipIf (not RUN_CUDA , "no CUDA" )
92- def test_scriptmodule_downmix_mono (self ):
93- tensor = torch .rand ((1 , 10 ), device = "cuda" )
94-
95- self ._test_script_module (tensor , transforms .DownmixMono )
96-
97- def test_torchscript_LC2CL (self ):
98- @torch .jit .script
99- def jit_method (tensor ):
100- # type: (Tensor) -> Tensor
101- return F .LC2CL (tensor )
102-
103- tensor = torch .rand ((10 , 1 ))
104-
105- jit_out = jit_method (tensor )
106- py_out = F .LC2CL (tensor )
107-
108- self .assertTrue (torch .allclose (jit_out , py_out ))
109-
110- @unittest .skipIf (not RUN_CUDA , "no CUDA" )
111- def test_scriptmodule_LC2CL (self ):
112- tensor = torch .rand ((10 , 1 ), device = "cuda" )
113-
114- self ._test_script_module (tensor , transforms .LC2CL )
115-
11655 def test_torchscript_spectrogram (self ):
11756 @torch .jit .script
11857 def jit_method (sig , pad , window , n_fft , hop , ws , power , normalize ):
@@ -167,7 +106,7 @@ def jit_method(spec, multiplier, amin, db_multiplier, top_db):
167106 # type: (Tensor, float, float, float, Optional[float]) -> Tensor
168107 return F .spectrogram_to_DB (spec , multiplier , amin , db_multiplier , top_db )
169108
170- spec = torch .rand ((10 , 1 ))
109+ spec = torch .rand ((6 , 201 ))
171110 multiplier = 10.
172111 amin = 1e-10
173112 db_multiplier = 0.
@@ -180,7 +119,7 @@ def jit_method(spec, multiplier, amin, db_multiplier, top_db):
180119
181120 @unittest .skipIf (not RUN_CUDA , "no CUDA" )
182121 def test_scriptmodule_SpectrogramToDB (self ):
183- spec = torch .rand ((10 , 1 ), device = "cuda" )
122+ spec = torch .rand ((6 , 201 ), device = "cuda" )
184123
185124 self ._test_script_module (spec , transforms .SpectrogramToDB )
186125
@@ -211,32 +150,13 @@ def test_scriptmodule_MelSpectrogram(self):
211150
212151 self ._test_script_module (tensor , transforms .MelSpectrogram )
213152
214- def test_torchscript_BLC2CBL (self ):
215- @torch .jit .script
216- def jit_method (tensor ):
217- # type: (Tensor) -> Tensor
218- return F .BLC2CBL (tensor )
219-
220- tensor = torch .rand ((10 , 1000 , 1 ))
221-
222- jit_out = jit_method (tensor )
223- py_out = F .BLC2CBL (tensor )
224-
225- self .assertTrue (torch .allclose (jit_out , py_out ))
226-
227- @unittest .skipIf (not RUN_CUDA , "no CUDA" )
228- def test_scriptmodule_BLC2CBL (self ):
229- tensor = torch .rand ((10 , 1000 , 1 ), device = "cuda" )
230-
231- self ._test_script_module (tensor , transforms .BLC2CBL )
232-
233153 def test_torchscript_mu_law_encoding (self ):
234154 @torch .jit .script
235155 def jit_method (tensor , qc ):
236156 # type: (Tensor, int) -> Tensor
237157 return F .mu_law_encoding (tensor , qc )
238158
239- tensor = torch .rand ((10 , 1 ))
159+ tensor = torch .rand ((1 , 10 ))
240160 qc = 256
241161
242162 jit_out = jit_method (tensor , qc )
@@ -246,7 +166,7 @@ def jit_method(tensor, qc):
246166
247167 @unittest .skipIf (not RUN_CUDA , "no CUDA" )
248168 def test_scriptmodule_MuLawEncoding (self ):
249- tensor = torch .rand ((10 , 1 ), device = "cuda" )
169+ tensor = torch .rand ((1 , 10 ), device = "cuda" )
250170
251171 self ._test_script_module (tensor , transforms .MuLawEncoding )
252172
@@ -256,7 +176,7 @@ def jit_method(tensor, qc):
256176 # type: (Tensor, int) -> Tensor
257177 return F .mu_law_expanding (tensor , qc )
258178
259- tensor = torch .rand ((10 , 1 ))
179+ tensor = torch .rand ((1 , 10 ))
260180 qc = 256
261181
262182 jit_out = jit_method (tensor , qc )
@@ -266,7 +186,7 @@ def jit_method(tensor, qc):
266186
267187 @unittest .skipIf (not RUN_CUDA , "no CUDA" )
268188 def test_scriptmodule_MuLawExpanding (self ):
269- tensor = torch .rand ((10 , 1 ), device = "cuda" )
189+ tensor = torch .rand ((1 , 10 ), device = "cuda" )
270190
271191 self ._test_script_module (tensor , transforms .MuLawExpanding )
272192
0 commit comments