diff --git a/core/conversion/converters/impl/unary.cpp b/core/conversion/converters/impl/unary.cpp index 4d8e7d1ba1..a1d03a359d 100644 --- a/core/conversion/converters/impl/unary.cpp +++ b/core/conversion/converters/impl/unary.cpp @@ -1,6 +1,8 @@ #include "core/conversion/converters/converters.h" #include "core/util/prelude.h" +#include "torch/torch.h" + namespace torch_tensorrt { namespace core { namespace conversion { @@ -8,6 +10,46 @@ namespace converters { namespace impl { namespace { + +auto abs_registration TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern( + {"aten::abs (Tensor self) -> Tensor", + [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + auto in = args[0].ITensor(); + bool unary_supported_input = in->getType() == nvinfer1::DataType::kFLOAT + || in->getType() == nvinfer1::DataType::kHALF + || in->getType() == nvinfer1::DataType::kINT8; + if(unary_supported_input){ + auto unary_layer = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kABS); + TORCHTRT_CHECK(unary_layer, "Unable to create abs layer from node: " << *n); + unary_layer->setName(util::node_info(n).c_str()); + auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], unary_layer->getOutput(0)); + LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions()); + return true; + } + else{ + //For types not supported by kABS, use an elementwise implementation abs(x) = max(x, -1 * x) + at::Tensor neg_one = torch::full({1}, -1).to(util::TRTDataTypeToScalarType(in->getType())); + auto neg_one_const = tensor_to_const(ctx, neg_one); + auto neg_layer = add_elementwise( + ctx, + nvinfer1::ElementWiseOperation::kPROD, + in, + neg_one_const, + util::node_info(n) + std::string("_Negation")); + TORCHTRT_CHECK(neg_layer, "Unable to create prod layer from node: " << *n); + auto max_layer = add_elementwise( + ctx, + nvinfer1::ElementWiseOperation::kMAX, + in, + neg_layer->getOutput(0), + util::node_info(n) + std::string("_Max")); + TORCHTRT_CHECK(max_layer, "Unable to create max layer from node: " << *n); + auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], max_layer->getOutput(0)); + LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions()); + return true; + } + }}); + #define convert(unary, trt_type) \ auto unary##_registrations TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern( \ {"aten::" #unary "(Tensor self) -> Tensor", \ @@ -32,7 +74,6 @@ convert(asin, kASIN); convert(sinh, kSINH); convert(tan, kTAN); convert(atan, kATAN); -convert(abs, kABS); convert(floor, kFLOOR); convert(reciprocal, kRECIP); convert(log, kLOG); diff --git a/tests/core/conversion/converters/test_unary.cpp b/tests/core/conversion/converters/test_unary.cpp index 9ed56ed0f0..a7ab3bb21d 100644 --- a/tests/core/conversion/converters/test_unary.cpp +++ b/tests/core/conversion/converters/test_unary.cpp @@ -1,4 +1,5 @@ #include +#include "torch/torch.h" #include "core/compiler.h" #include "gtest/gtest.h" #include "tests/util/util.h" @@ -14,6 +15,22 @@ std::string gen_test_graph(const std::string& unary) { } } // namespace +TEST(Converters, ATenAbsIntConvertsCorrectly) { + const auto graph = gen_test_graph("abs"); + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::tensor({-1, 1, -2, 2, -3, 3}, {at::kCUDA}).to(torch::kInt32); + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + in = at::clone(in); + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE(torch_tensorrt::tests::util::exactlyEqual(jit_results[0], trt_results[0])); +} + #define test_unary(unary, name) \ TEST(Converters, ATen##name##ConvertsCorrectly) { \ const auto graph = gen_test_graph(#unary); \