From cea430ad225b627f3e5e511010e9ad80fcfaa991 Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Fri, 5 Feb 2021 17:04:50 -0800 Subject: [PATCH 01/14] Bare minimum Implementation of core lfilter loop without any necessary checks on input --- torchaudio/csrc/CMakeLists.txt | 1 + torchaudio/csrc/lfilter.cpp | 47 ++++++++++++++++++++++++++++++ torchaudio/functional/filtering.py | 9 ++---- 3 files changed, 50 insertions(+), 7 deletions(-) create mode 100644 torchaudio/csrc/lfilter.cpp diff --git a/torchaudio/csrc/CMakeLists.txt b/torchaudio/csrc/CMakeLists.txt index 7b920596f5..6a9ea12e18 100644 --- a/torchaudio/csrc/CMakeLists.txt +++ b/torchaudio/csrc/CMakeLists.txt @@ -55,6 +55,7 @@ if (BUILD_TORCHAUDIO_PYTHON_EXTENSION) SHARED pybind.cpp sox/legacy.cpp + lfilter.cpp ${LIBTORCHAUDIO_SOURCES} ) diff --git a/torchaudio/csrc/lfilter.cpp b/torchaudio/csrc/lfilter.cpp new file mode 100644 index 0000000000..a06f8b2fa4 --- /dev/null +++ b/torchaudio/csrc/lfilter.cpp @@ -0,0 +1,47 @@ +#include +#include +#include +#include + +#include + +namespace { + +int64_t cpu_lfilter_core_loop( + const torch::Tensor& input_signal_windows, + const torch::Tensor& a_coeff_flipped, + torch::Tensor& padded_output_waveform) { + //TODO: Implement all checks + int64_t n_channel = input_signal_windows.size(0); + int64_t n_samples_input = input_signal_windows.size(1); + int64_t n_samples_output = padded_output_waveform.size(1); + int64_t n_order = a_coeff_flipped.size(0); + float* output_data = padded_output_waveform.data_ptr(); + const float* input_data = input_signal_windows.data_ptr(); + const float* a_coeff_flipped_data = a_coeff_flipped.data_ptr(); + for(int64_t i_channel = 0; i_channel int"); +} \ No newline at end of file diff --git a/torchaudio/functional/filtering.py b/torchaudio/functional/filtering.py index 55cf05a51f..b64b4d2e71 100644 --- a/torchaudio/functional/filtering.py +++ b/torchaudio/functional/filtering.py @@ -877,13 +877,8 @@ def lfilter( input_signal_windows.div_(a_coeffs[0]) a_coeffs_flipped.div_(a_coeffs[0]) - for i_sample, o0 in enumerate(input_signal_windows.t()): - windowed_output_signal = padded_output_waveform[ - :, i_sample:i_sample + n_order - ] - o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1) - padded_output_waveform[:, i_sample + n_order - 1] = o0 - + + torch.ops.torchaudio._lfilter_core_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) output = padded_output_waveform[:, n_order - 1:] if clamp: From 13043fec0801ec5a2ce1e8fe0b2f2872ec1b68a7 Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Fri, 5 Feb 2021 17:42:10 -0800 Subject: [PATCH 02/14] removed unnecessary headers --- torchaudio/csrc/lfilter.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/torchaudio/csrc/lfilter.cpp b/torchaudio/csrc/lfilter.cpp index a06f8b2fa4..b462a4d4d1 100644 --- a/torchaudio/csrc/lfilter.cpp +++ b/torchaudio/csrc/lfilter.cpp @@ -1,8 +1,3 @@ -#include -#include -#include -#include - #include namespace { From 6d2fbf88ef2de876e011333081e53bc7d870a2f8 Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Mon, 8 Feb 2021 08:16:52 -0800 Subject: [PATCH 03/14] Implemented various checks on input --- torchaudio/csrc/lfilter.cpp | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/torchaudio/csrc/lfilter.cpp b/torchaudio/csrc/lfilter.cpp index b462a4d4d1..7e6418129a 100644 --- a/torchaudio/csrc/lfilter.cpp +++ b/torchaudio/csrc/lfilter.cpp @@ -6,7 +6,23 @@ int64_t cpu_lfilter_core_loop( const torch::Tensor& input_signal_windows, const torch::Tensor& a_coeff_flipped, torch::Tensor& padded_output_waveform) { - //TODO: Implement all checks + + TORCH_CHECK(input_signal_windows.device().is_cpu() && + a_coeff_flipped.device().is_cpu() && + padded_output_waveform.device().is_cpu()); + + TORCH_CHECK(input_signal_windows.is_contiguous() && + a_coeff_flipped.is_contiguous() && + padded_output_waveform.is_contiguous()); + + TORCH_CHECK(input_signal_windows.dtype().name()=="float" && + a_coeff_flipped.dtype().name()=="float" && + padded_output_waveform.dtype().name()=="float"); + + TORCH_CHECK(input_signal_windows.size(0)==padded_output_waveform.size(0)); + + TORCH_CHECK(input_signal_windows.size(1)+a_coeff_flipped.size(0)-1==padded_output_waveform.size(1)); + int64_t n_channel = input_signal_windows.size(0); int64_t n_samples_input = input_signal_windows.size(1); int64_t n_samples_output = padded_output_waveform.size(1); From 0ea4f037570263f3f93aacc23d1661b91a25c580 Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Mon, 8 Feb 2021 10:06:26 -0800 Subject: [PATCH 04/14] Calling existing implementation for float64 types as C++ implementation only supports float32 at the moment --- torchaudio/functional/filtering.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/torchaudio/functional/filtering.py b/torchaudio/functional/filtering.py index b64b4d2e71..ee982cc244 100644 --- a/torchaudio/functional/filtering.py +++ b/torchaudio/functional/filtering.py @@ -878,7 +878,16 @@ def lfilter( input_signal_windows.div_(a_coeffs[0]) a_coeffs_flipped.div_(a_coeffs[0]) - torch.ops.torchaudio._lfilter_core_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) + if input_signal_windows.dtype==torch.float64: + for i_sample, o0 in enumerate(input_signal_windows.t()): + windowed_output_signal = padded_output_waveform[ + :, i_sample:i_sample + n_order + ] + o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1) + padded_output_waveform[:, i_sample + n_order - 1] = o0 + else: + torch.ops.torchaudio._lfilter_core_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) + output = padded_output_waveform[:, n_order - 1:] if clamp: From 14f8675c8f0911f9c8ef2a99e7718ad448269fe3 Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Mon, 8 Feb 2021 10:29:24 -0800 Subject: [PATCH 05/14] moving lfilter.cpp to LIBTORCHAUDIO_SOURCES --- torchaudio/csrc/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchaudio/csrc/CMakeLists.txt b/torchaudio/csrc/CMakeLists.txt index 6a9ea12e18..f11f92f3eb 100644 --- a/torchaudio/csrc/CMakeLists.txt +++ b/torchaudio/csrc/CMakeLists.txt @@ -9,6 +9,7 @@ set( sox/utils.cpp sox/effects.cpp sox/effects_chain.cpp + lfilter.cpp ) if(BUILD_TRANSDUCER) @@ -55,7 +56,6 @@ if (BUILD_TORCHAUDIO_PYTHON_EXTENSION) SHARED pybind.cpp sox/legacy.cpp - lfilter.cpp ${LIBTORCHAUDIO_SOURCES} ) From fc9617db4c877729256d0ae3f4712de32fed2d30 Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Mon, 8 Feb 2021 16:55:01 -0800 Subject: [PATCH 06/14] templated C++ operator to support both float32 and float64 types --- torchaudio/csrc/lfilter.cpp | 79 +++++++++++++++++------------- torchaudio/functional/filtering.py | 12 +---- 2 files changed, 47 insertions(+), 44 deletions(-) diff --git a/torchaudio/csrc/lfilter.cpp b/torchaudio/csrc/lfilter.cpp index 7e6418129a..2356fdc9f6 100644 --- a/torchaudio/csrc/lfilter.cpp +++ b/torchaudio/csrc/lfilter.cpp @@ -2,48 +2,59 @@ namespace { -int64_t cpu_lfilter_core_loop( +template +int64_t host_lfilter_core_loop( const torch::Tensor& input_signal_windows, const torch::Tensor& a_coeff_flipped, torch::Tensor& padded_output_waveform) { - - TORCH_CHECK(input_signal_windows.device().is_cpu() && - a_coeff_flipped.device().is_cpu() && - padded_output_waveform.device().is_cpu()); - - TORCH_CHECK(input_signal_windows.is_contiguous() && - a_coeff_flipped.is_contiguous() && - padded_output_waveform.is_contiguous()); - - TORCH_CHECK(input_signal_windows.dtype().name()=="float" && - a_coeff_flipped.dtype().name()=="float" && - padded_output_waveform.dtype().name()=="float"); - - TORCH_CHECK(input_signal_windows.size(0)==padded_output_waveform.size(0)); - - TORCH_CHECK(input_signal_windows.size(1)+a_coeff_flipped.size(0)-1==padded_output_waveform.size(1)); - - int64_t n_channel = input_signal_windows.size(0); - int64_t n_samples_input = input_signal_windows.size(1); - int64_t n_samples_output = padded_output_waveform.size(1); - int64_t n_order = a_coeff_flipped.size(0); - float* output_data = padded_output_waveform.data_ptr(); - const float* input_data = input_signal_windows.data_ptr(); - const float* a_coeff_flipped_data = a_coeff_flipped.data_ptr(); - for(int64_t i_channel = 0; i_channel(); + const scalar_t* input_data = input_signal_windows.data_ptr(); + const scalar_t* a_coeff_flipped_data = a_coeff_flipped.data_ptr(); + for(int64_t i_channel = 0; i_channel(input_signal_windows, a_coeff_flipped, padded_output_waveform);}); + + return output; +} + } // namespace TORCH_LIBRARY_IMPL(torchaudio, CPU, m) { diff --git a/torchaudio/functional/filtering.py b/torchaudio/functional/filtering.py index ee982cc244..4b0f255096 100644 --- a/torchaudio/functional/filtering.py +++ b/torchaudio/functional/filtering.py @@ -877,16 +877,8 @@ def lfilter( input_signal_windows.div_(a_coeffs[0]) a_coeffs_flipped.div_(a_coeffs[0]) - - if input_signal_windows.dtype==torch.float64: - for i_sample, o0 in enumerate(input_signal_windows.t()): - windowed_output_signal = padded_output_waveform[ - :, i_sample:i_sample + n_order - ] - o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1) - padded_output_waveform[:, i_sample + n_order - 1] = o0 - else: - torch.ops.torchaudio._lfilter_core_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) + + torch.ops.torchaudio._lfilter_core_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) output = padded_output_waveform[:, n_order - 1:] From 7744398c7bf50abaefb2f95d0c79fdf5c17f4441 Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Tue, 9 Feb 2021 10:38:09 -0800 Subject: [PATCH 07/14] guarding core loop operator call for cpu device only --- torchaudio/functional/filtering.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/torchaudio/functional/filtering.py b/torchaudio/functional/filtering.py index 4b0f255096..cf3c9837b7 100644 --- a/torchaudio/functional/filtering.py +++ b/torchaudio/functional/filtering.py @@ -877,8 +877,20 @@ def lfilter( input_signal_windows.div_(a_coeffs[0]) a_coeffs_flipped.div_(a_coeffs[0]) + + if input_signal_windows.device.type=='cpu' and\ + a_coeffs_flipped.device.type=='cpu' and\ + padded_output_waveform.device.type=='cpu': + torch.ops.torchaudio._lfilter_core_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) + else: + for i_sample, o0 in enumerate(input_signal_windows.t()): + windowed_output_signal = padded_output_waveform[ + :, i_sample:i_sample + n_order + ] + o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1) + padded_output_waveform[:, i_sample + n_order - 1] = o0 + - torch.ops.torchaudio._lfilter_core_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) output = padded_output_waveform[:, n_order - 1:] From 2f7cc091815b30036b2d8cbbb09602f52e768408 Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Tue, 9 Feb 2021 19:35:26 -0800 Subject: [PATCH 08/14] comparing host device directly via device object --- torchaudio/functional/filtering.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/torchaudio/functional/filtering.py b/torchaudio/functional/filtering.py index cf3c9837b7..d3cd701337 100644 --- a/torchaudio/functional/filtering.py +++ b/torchaudio/functional/filtering.py @@ -878,9 +878,9 @@ def lfilter( input_signal_windows.div_(a_coeffs[0]) a_coeffs_flipped.div_(a_coeffs[0]) - if input_signal_windows.device.type=='cpu' and\ - a_coeffs_flipped.device.type=='cpu' and\ - padded_output_waveform.device.type=='cpu': + if input_signal_windows.device==torch.device('cpu') and\ + a_coeffs_flipped.device==torch.device('cpu') and\ + padded_output_waveform.device==torch.device('cpu'): torch.ops.torchaudio._lfilter_core_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) else: for i_sample, o0 in enumerate(input_signal_windows.t()): From c6b44ed18cc3f97671aaafe73fe290cd0ecfc73f Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Wed, 10 Feb 2021 12:55:20 -0800 Subject: [PATCH 09/14] Registering operator using "catch-all" kernel as Torchscript fails to script with CPU dispatch-key specific registration --- torchaudio/csrc/lfilter.cpp | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/torchaudio/csrc/lfilter.cpp b/torchaudio/csrc/lfilter.cpp index 2356fdc9f6..88bce408be 100644 --- a/torchaudio/csrc/lfilter.cpp +++ b/torchaudio/csrc/lfilter.cpp @@ -3,7 +3,7 @@ namespace { template -int64_t host_lfilter_core_loop( +void host_lfilter_core_loop( const torch::Tensor& input_signal_windows, const torch::Tensor& a_coeff_flipped, torch::Tensor& padded_output_waveform) { @@ -25,10 +25,9 @@ int64_t host_lfilter_core_loop( output_data[offset_output+i_sample+n_order-1] = a0; } } - return 0; } -int64_t cpu_lfilter_core_loop( +void cpu_lfilter_core_loop( const torch::Tensor& input_signal_windows, const torch::Tensor& a_coeff_flipped, torch::Tensor& padded_output_waveform) { @@ -49,21 +48,12 @@ int64_t cpu_lfilter_core_loop( TORCH_CHECK(input_signal_windows.size(1)+a_coeff_flipped.size(0)-1==padded_output_waveform.size(1)); - int64_t output = AT_DISPATCH_FLOATING_TYPES(input_signal_windows.scalar_type(), "lfilter_core_loop", [&] { - return host_lfilter_core_loop(input_signal_windows, a_coeff_flipped, padded_output_waveform);}); - - return output; + AT_DISPATCH_FLOATING_TYPES(input_signal_windows.scalar_type(), "lfilter_core_loop", [&] { + host_lfilter_core_loop(input_signal_windows, a_coeff_flipped, padded_output_waveform);}); } } // namespace -TORCH_LIBRARY_IMPL(torchaudio, CPU, m) { - m.impl("_lfilter_core_loop", &cpu_lfilter_core_loop); -} - TORCH_LIBRARY_FRAGMENT(torchaudio, m) { - m.def( - "_lfilter_core_loop(Tensor input_signal_windows," - "Tensor a_coeff_flipped_flipped," - "Tensor padded_output_waveform) -> int"); -} \ No newline at end of file + m.def("torchaudio::_lfilter_core_loop", &cpu_lfilter_core_loop); +} From 2d693a67eca00b665678826ff3851a7e24dbc691 Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Wed, 10 Feb 2021 13:40:09 -0800 Subject: [PATCH 10/14] fixed Linter issues in filtering.py and added comment on binding code on registration --- torchaudio/csrc/lfilter.cpp | 2 ++ torchaudio/functional/filtering.py | 8 +++----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/torchaudio/csrc/lfilter.cpp b/torchaudio/csrc/lfilter.cpp index 88bce408be..33667738bd 100644 --- a/torchaudio/csrc/lfilter.cpp +++ b/torchaudio/csrc/lfilter.cpp @@ -54,6 +54,8 @@ void cpu_lfilter_core_loop( } // namespace +// Note: We want to avoid using "catch-all" kernel. +// The following registration should be replaced with CPU specific registration. TORCH_LIBRARY_FRAGMENT(torchaudio, m) { m.def("torchaudio::_lfilter_core_loop", &cpu_lfilter_core_loop); } diff --git a/torchaudio/functional/filtering.py b/torchaudio/functional/filtering.py index d3cd701337..1e4b32258b 100644 --- a/torchaudio/functional/filtering.py +++ b/torchaudio/functional/filtering.py @@ -878,9 +878,9 @@ def lfilter( input_signal_windows.div_(a_coeffs[0]) a_coeffs_flipped.div_(a_coeffs[0]) - if input_signal_windows.device==torch.device('cpu') and\ - a_coeffs_flipped.device==torch.device('cpu') and\ - padded_output_waveform.device==torch.device('cpu'): + if input_signal_windows.device == torch.device('cpu') and\ + a_coeffs_flipped.device == torch.device('cpu') and\ + padded_output_waveform.device == torch.device('cpu'): torch.ops.torchaudio._lfilter_core_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) else: for i_sample, o0 in enumerate(input_signal_windows.t()): @@ -890,8 +890,6 @@ def lfilter( o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1) padded_output_waveform[:, i_sample + n_order - 1] = o0 - - output = padded_output_waveform[:, n_order - 1:] if clamp: From 08aae5e5fbaa7d5d2fd3df3724b06d08672fd637 Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Thu, 11 Feb 2021 08:16:47 -0800 Subject: [PATCH 11/14] gaurding operator call based on OS as C++ extensions are not available on 'nt' and resolving linter issues. --- torchaudio/csrc/lfilter.cpp | 74 +++++++++++++++++------------- torchaudio/functional/filtering.py | 4 +- 2 files changed, 45 insertions(+), 33 deletions(-) diff --git a/torchaudio/csrc/lfilter.cpp b/torchaudio/csrc/lfilter.cpp index 33667738bd..af9425fd15 100644 --- a/torchaudio/csrc/lfilter.cpp +++ b/torchaudio/csrc/lfilter.cpp @@ -2,11 +2,11 @@ namespace { -template +template void host_lfilter_core_loop( - const torch::Tensor& input_signal_windows, - const torch::Tensor& a_coeff_flipped, - torch::Tensor& padded_output_waveform) { + const torch::Tensor& input_signal_windows, + const torch::Tensor& a_coeff_flipped, + torch::Tensor& padded_output_waveform) { int64_t n_channel = input_signal_windows.size(0); int64_t n_samples_input = input_signal_windows.size(1); int64_t n_samples_output = padded_output_waveform.size(1); @@ -14,48 +14,58 @@ void host_lfilter_core_loop( scalar_t* output_data = padded_output_waveform.data_ptr(); const scalar_t* input_data = input_signal_windows.data_ptr(); const scalar_t* a_coeff_flipped_data = a_coeff_flipped.data_ptr(); - for(int64_t i_channel = 0; i_channel(input_signal_windows, a_coeff_flipped, padded_output_waveform);}); + AT_DISPATCH_FLOATING_TYPES( + input_signal_windows.scalar_type(), "lfilter_core_loop", [&] { + host_lfilter_core_loop( + input_signal_windows, a_coeff_flipped, padded_output_waveform); + }); } - + } // namespace -// Note: We want to avoid using "catch-all" kernel. -// The following registration should be replaced with CPU specific registration. +// Note: We want to avoid using "catch-all" kernel. +// The following registration should be replaced with CPU specific registration. TORCH_LIBRARY_FRAGMENT(torchaudio, m) { m.def("torchaudio::_lfilter_core_loop", &cpu_lfilter_core_loop); } diff --git a/torchaudio/functional/filtering.py b/torchaudio/functional/filtering.py index 1e4b32258b..78cd7a4823 100644 --- a/torchaudio/functional/filtering.py +++ b/torchaudio/functional/filtering.py @@ -1,4 +1,5 @@ import math +import os from typing import Optional import torch @@ -878,7 +879,8 @@ def lfilter( input_signal_windows.div_(a_coeffs[0]) a_coeffs_flipped.div_(a_coeffs[0]) - if input_signal_windows.device == torch.device('cpu') and\ + if os.name == 'posix' and\ + input_signal_windows.device == torch.device('cpu') and\ a_coeffs_flipped.device == torch.device('cpu') and\ padded_output_waveform.device == torch.device('cpu'): torch.ops.torchaudio._lfilter_core_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) From 42bcb115a170b56b05e9667c40c0bb9b6f0033e1 Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Thu, 11 Feb 2021 13:48:56 -0800 Subject: [PATCH 12/14] implementing fallback solution when operator is not available. --- torchaudio/functional/filtering.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/torchaudio/functional/filtering.py b/torchaudio/functional/filtering.py index 78cd7a4823..4e7d41c900 100644 --- a/torchaudio/functional/filtering.py +++ b/torchaudio/functional/filtering.py @@ -7,6 +7,20 @@ import torchaudio._internal.fft +try: + lfilter_core_loop = torch.ops.torchaudio._lfilter_core_loop +except RuntimeError as err: + assert str(err) == 'No such operator torchaudio::_lfilter_core_loop' + + def lfilter_core_loop(input_signal_windows: Tensor, a_coeffs_flipped: Tensor, padded_output_waveform: Tensor): + n_order = a_coeffs_flipped.size(0) + for i_sample, o0 in enumerate(input_signal_windows.t()): + windowed_output_signal = padded_output_waveform[ + :, i_sample:i_sample + n_order + ] + o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1) + padded_output_waveform[:, i_sample + n_order - 1] = o0 + def _dB2Linear(x: float) -> float: return math.exp(x * math.log(10) / 20.0) @@ -879,11 +893,10 @@ def lfilter( input_signal_windows.div_(a_coeffs[0]) a_coeffs_flipped.div_(a_coeffs[0]) - if os.name == 'posix' and\ - input_signal_windows.device == torch.device('cpu') and\ + if input_signal_windows.device == torch.device('cpu') and\ a_coeffs_flipped.device == torch.device('cpu') and\ padded_output_waveform.device == torch.device('cpu'): - torch.ops.torchaudio._lfilter_core_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) + lfilter_core_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) else: for i_sample, o0 in enumerate(input_signal_windows.t()): windowed_output_signal = padded_output_waveform[ From fab80f346fff66c06dcf4aba0b2bae895b451c0a Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Fri, 12 Feb 2021 06:45:38 -0800 Subject: [PATCH 13/14] avoiding code repitition of lfilter_core_loop --- torchaudio/functional/filtering.py | 32 ++++++++++++++---------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/torchaudio/functional/filtering.py b/torchaudio/functional/filtering.py index 4e7d41c900..5261abb703 100644 --- a/torchaudio/functional/filtering.py +++ b/torchaudio/functional/filtering.py @@ -7,19 +7,22 @@ import torchaudio._internal.fft + +def lfilter_core_generic_loop(input_signal_windows: Tensor, a_coeffs_flipped: Tensor, padded_output_waveform: Tensor): + n_order = a_coeffs_flipped.size(0) + for i_sample, o0 in enumerate(input_signal_windows.t()): + windowed_output_signal = padded_output_waveform[ + :, i_sample:i_sample + n_order + ] + o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1) + padded_output_waveform[:, i_sample + n_order - 1] = o0 + + try: - lfilter_core_loop = torch.ops.torchaudio._lfilter_core_loop + lfilter_core_cpu_loop = torch.ops.torchaudio._lfilter_core_loop except RuntimeError as err: assert str(err) == 'No such operator torchaudio::_lfilter_core_loop' - - def lfilter_core_loop(input_signal_windows: Tensor, a_coeffs_flipped: Tensor, padded_output_waveform: Tensor): - n_order = a_coeffs_flipped.size(0) - for i_sample, o0 in enumerate(input_signal_windows.t()): - windowed_output_signal = padded_output_waveform[ - :, i_sample:i_sample + n_order - ] - o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1) - padded_output_waveform[:, i_sample + n_order - 1] = o0 + lfilter_core_cpu_loop = lfilter_core_generic_loop def _dB2Linear(x: float) -> float: @@ -896,14 +899,9 @@ def lfilter( if input_signal_windows.device == torch.device('cpu') and\ a_coeffs_flipped.device == torch.device('cpu') and\ padded_output_waveform.device == torch.device('cpu'): - lfilter_core_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) + lfilter_core_cpu_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) else: - for i_sample, o0 in enumerate(input_signal_windows.t()): - windowed_output_signal = padded_output_waveform[ - :, i_sample:i_sample + n_order - ] - o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1) - padded_output_waveform[:, i_sample + n_order - 1] = o0 + lfilter_core_generic_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) output = padded_output_waveform[:, n_order - 1:] From 3ee854cdf5e54d34a4a99814e293e88aa368110e Mon Sep 17 00:00:00 2001 From: Parmeet Bhatia Date: Fri, 12 Feb 2021 10:25:45 -0800 Subject: [PATCH 14/14] fixing style related issues --- torchaudio/functional/filtering.py | 39 +++++++++++++++--------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/torchaudio/functional/filtering.py b/torchaudio/functional/filtering.py index 5261abb703..b37100d15e 100644 --- a/torchaudio/functional/filtering.py +++ b/torchaudio/functional/filtering.py @@ -1,5 +1,4 @@ import math -import os from typing import Optional import torch @@ -8,23 +7,6 @@ import torchaudio._internal.fft -def lfilter_core_generic_loop(input_signal_windows: Tensor, a_coeffs_flipped: Tensor, padded_output_waveform: Tensor): - n_order = a_coeffs_flipped.size(0) - for i_sample, o0 in enumerate(input_signal_windows.t()): - windowed_output_signal = padded_output_waveform[ - :, i_sample:i_sample + n_order - ] - o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1) - padded_output_waveform[:, i_sample + n_order - 1] = o0 - - -try: - lfilter_core_cpu_loop = torch.ops.torchaudio._lfilter_core_loop -except RuntimeError as err: - assert str(err) == 'No such operator torchaudio::_lfilter_core_loop' - lfilter_core_cpu_loop = lfilter_core_generic_loop - - def _dB2Linear(x: float) -> float: return math.exp(x * math.log(10) / 20.0) @@ -826,6 +808,23 @@ def highpass_biquad( return biquad(waveform, b0, b1, b2, a0, a1, a2) +def _lfilter_core_generic_loop(input_signal_windows: Tensor, a_coeffs_flipped: Tensor, padded_output_waveform: Tensor): + n_order = a_coeffs_flipped.size(0) + for i_sample, o0 in enumerate(input_signal_windows.t()): + windowed_output_signal = padded_output_waveform[ + :, i_sample:i_sample + n_order + ] + o0.addmv_(windowed_output_signal, a_coeffs_flipped, alpha=-1) + padded_output_waveform[:, i_sample + n_order - 1] = o0 + + +try: + _lfilter_core_cpu_loop = torch.ops.torchaudio._lfilter_core_loop +except RuntimeError as err: + assert str(err) == 'No such operator torchaudio::_lfilter_core_loop' + _lfilter_core_cpu_loop = _lfilter_core_generic_loop + + def lfilter( waveform: Tensor, a_coeffs: Tensor, @@ -899,9 +898,9 @@ def lfilter( if input_signal_windows.device == torch.device('cpu') and\ a_coeffs_flipped.device == torch.device('cpu') and\ padded_output_waveform.device == torch.device('cpu'): - lfilter_core_cpu_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) + _lfilter_core_cpu_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) else: - lfilter_core_generic_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) + _lfilter_core_generic_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) output = padded_output_waveform[:, n_order - 1:]