From cbe374e6b685f7b0834e20c8ba1b8070574de175 Mon Sep 17 00:00:00 2001 From: Michael Feliz Date: Thu, 25 Aug 2022 15:10:45 -0700 Subject: [PATCH 1/3] [feat] Add support for argmax and argmin Adds support for aten::argmax and aten::argmin. Fixes # (issue) Please delete options that are not relevant and/or add your own. - Bug fix (non-breaking change which fixes an issue) - New feature (non-breaking change which adds functionality) - Breaking change (fix or feature that would cause existing functionality to not work as expected) - This change requires a documentation update - [ ] My code follows the style guidelines of this project (You can use the linters) - [ ] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas and hacks - [ ] I have made corresponding changes to the documentation - [ ] I have added tests to verify my fix or my feature - [ ] New and existing unit tests pass locally with my changes - [ ] I have added the relevant labels to my PR in so that relevant reviewers are notified --- core/conversion/converters/impl/max.cpp | 130 ++++++---- .../core/conversion/converters/test_topk.cpp | 231 +++++++++++++----- 2 files changed, 263 insertions(+), 98 deletions(-) mode change 100644 => 100755 core/conversion/converters/impl/max.cpp diff --git a/core/conversion/converters/impl/max.cpp b/core/conversion/converters/impl/max.cpp old mode 100644 new mode 100755 index 175cc75461..3ccf165bbe --- a/core/conversion/converters/impl/max.cpp +++ b/core/conversion/converters/impl/max.cpp @@ -13,47 +13,95 @@ namespace conversion { namespace converters { namespace impl { namespace { -auto max_registrations TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern( - {"aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", - [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { - auto self = args[0].ITensorOrFreeze(ctx); - auto dim = args[1].unwrapToInt(); - auto keep_dims = args[2].unwrapToBool(); - auto selfDim = util::toVec(self->getDimensions()); - if (dim < 0) { - dim = selfDim.size() + dim; - } - uint32_t shiftDim = 1 << dim; - auto TopKOperation = nvinfer1::TopKOperation::kMAX; - auto topk_layer = ctx->net->addTopK(*self, TopKOperation, 1, shiftDim); - TORCHTRT_CHECK(topk_layer, "Unable to create max layer from node: " << *n); - auto topk_dims = util::toVec(topk_layer->getOutput(0)->getDimensions()); - - nvinfer1::ITensor* out0 = nullptr; - nvinfer1::ITensor* out1 = nullptr; - if (!keep_dims) { - if (topk_dims[dim] == 1) { - auto squeeze_layer = ctx->net->addShuffle(*topk_layer->getOutput(0)); - squeeze_layer->setReshapeDimensions(util::squeezeDims(topk_layer->getOutput(0)->getDimensions(), dim)); - TORCHTRT_CHECK(squeeze_layer, "Unable to create squeeze_layer layer from node: " << *n); - out0 = ctx->AssociateValueAndTensor(n->outputs()[0], squeeze_layer->getOutput(0)); - - auto squeeze_layer_indices = ctx->net->addShuffle(*topk_layer->getOutput(1)); - squeeze_layer_indices->setReshapeDimensions( - util::squeezeDims(topk_layer->getOutput(1)->getDimensions(), dim)); - TORCHTRT_CHECK(squeeze_layer_indices, "Unable to create squeeze_layer_indices layer from node: " << *n); - out1 = ctx->AssociateValueAndTensor(n->outputs()[1], squeeze_layer_indices->getOutput(0)); - } - } else { - out0 = ctx->AssociateValueAndTensor(n->outputs()[0], topk_layer->getOutput(0)); - out1 = ctx->AssociateValueAndTensor(n->outputs()[1], topk_layer->getOutput(1)); - } - - LOG_DEBUG("Output tensor(0) shape: " << out0->getDimensions()); - LOG_DEBUG("Output tensor(1) shape: " << out1->getDimensions()); - - return true; - }}); + +bool min_max_dim(ConversionCtx* ctx, const torch::jit::Node* n, args& args, nvinfer1::TopKOperation topKOperation) { + auto self = args[0].ITensorOrFreeze(ctx); + auto dim = args[1].unwrapToInt(); + auto keep_dims = args[2].unwrapToBool(); + auto selfDim = util::toVec(self->getDimensions()); + if (dim < 0) { + dim = selfDim.size() + dim; + } + uint32_t reduce_axes_mask = 1 << dim; + auto topk_layer = ctx->net->addTopK(*self, topKOperation, 1, reduce_axes_mask); + TORCHTRT_CHECK(topk_layer, "Unable to create topk layer from node: " << *n); + auto topk_dims = util::toVec(topk_layer->getOutput(0)->getDimensions()); + + nvinfer1::ITensor* out0 = nullptr; + nvinfer1::ITensor* out1 = nullptr; + if (!keep_dims) { + TORCHTRT_CHECK(topk_dims[dim] == 1, "Unexpected size in squeeze dimension. Expected: 1 Actual: " << topk_dims[dim]); + auto squeeze_layer = ctx->net->addShuffle(*topk_layer->getOutput(0)); + squeeze_layer->setReshapeDimensions(util::squeezeDims(topk_layer->getOutput(0)->getDimensions(), dim)); + TORCHTRT_CHECK(squeeze_layer, "Unable to create squeeze_layer layer from node: " << *n); + out0 = ctx->AssociateValueAndTensor(n->outputs()[0], squeeze_layer->getOutput(0)); + + auto squeeze_layer_indices = ctx->net->addShuffle(*topk_layer->getOutput(1)); + squeeze_layer_indices->setReshapeDimensions(util::squeezeDims(topk_layer->getOutput(1)->getDimensions(), dim)); + TORCHTRT_CHECK(squeeze_layer_indices, "Unable to create squeeze_layer_indices layer from node: " << *n); + out1 = ctx->AssociateValueAndTensor(n->outputs()[1], squeeze_layer_indices->getOutput(0)); + } else { + out0 = ctx->AssociateValueAndTensor(n->outputs()[0], topk_layer->getOutput(0)); + out1 = ctx->AssociateValueAndTensor(n->outputs()[1], topk_layer->getOutput(1)); + } + + LOG_DEBUG("Output tensor(0) shape: " << out0->getDimensions()); + LOG_DEBUG("Output tensor(1) shape: " << out1->getDimensions()); + + return true; +} + +bool arg_min_max(ConversionCtx* ctx, const torch::jit::Node* n, args& args, nvinfer1::TopKOperation topKOperation) { + auto self = args[0].ITensorOrFreeze(ctx); + auto dim = args[1].unwrapToInt(); + auto keep_dims = args[2].unwrapToBool(); + auto selfDim = util::toVec(self->getDimensions()); + if (dim < 0) { + dim = selfDim.size() + dim; + } + uint32_t reduce_axes_mask = 1 << dim; + auto topk_layer = ctx->net->addTopK(*self, topKOperation, 1, reduce_axes_mask); + TORCHTRT_CHECK(topk_layer, "Unable to create topk layer from node: " << *n); + auto topk_dims = util::toVec(topk_layer->getOutput(0)->getDimensions()); + + nvinfer1::ITensor* out = nullptr; + if (!keep_dims) { + TORCHTRT_CHECK(topk_dims[dim] == 1, "Unexpected size in squeeze dimension. Expected: 1 Actual: " << topk_dims[dim]); + auto squeeze_layer_indices = ctx->net->addShuffle(*topk_layer->getOutput(1)); + squeeze_layer_indices->setReshapeDimensions(util::squeezeDims(topk_layer->getOutput(1)->getDimensions(), dim)); + TORCHTRT_CHECK(squeeze_layer_indices, "Unable to create squeeze_layer_indices layer from node: " << *n); + out = ctx->AssociateValueAndTensor(n->outputs()[0], squeeze_layer_indices->getOutput(0)); + } else { + out = ctx->AssociateValueAndTensor(n->outputs()[0], topk_layer->getOutput(1)); + } + + LOG_DEBUG("Output tensor shape: " << out->getDimensions()); + + return true; +} + +auto max_registrations TORCHTRT_UNUSED = + RegisterNodeConversionPatterns() + .pattern( + {"aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", + [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + return min_max_dim(ctx, n, args, nvinfer1::TopKOperation::kMAX); + }}) + .pattern( + {"aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", + [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + return min_max_dim(ctx, n, args, nvinfer1::TopKOperation::kMIN); + }}) + .pattern( + {"aten::argmax(Tensor self, int dim, bool keepdim=False) -> (Tensor)", + [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + return arg_min_max(ctx, n, args, nvinfer1::TopKOperation::kMAX); + }}) + .pattern( + {"aten::argmin(Tensor self, int dim, bool keepdim=False) -> (Tensor)", + [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + return arg_min_max(ctx, n, args, nvinfer1::TopKOperation::kMIN); + }}); } // namespace } // namespace impl } // namespace converters diff --git a/tests/core/conversion/converters/test_topk.cpp b/tests/core/conversion/converters/test_topk.cpp index 1885493737..001c6ee955 100644 --- a/tests/core/conversion/converters/test_topk.cpp +++ b/tests/core/conversion/converters/test_topk.cpp @@ -1,57 +1,174 @@ -#include -#include "core/compiler.h" -#include "gtest/gtest.h" -#include "tests/util/util.h" -#include "torch/csrc/jit/ir/irparser.h" - -TEST(Converters, ATenTopKConvertsCorrectly) { - const auto graph = R"IR( - graph(%0 : Tensor): - %1 : int = prim::Constant[value=20]() - %2 : int = prim::Constant[value=-1]() - %3 : bool = prim::Constant[value=1]() - %4 : bool = prim::Constant[value=1]() - %5 : Tensor, %6 : Tensor = aten::topk(%0, %1, %2, %3, %4) - return (%5, %6))IR"; - - auto g = std::make_shared(); - torch::jit::parseIR(graph, g.get()); - - auto in = at::rand({10, 10, 100}, {at::kCUDA}); - - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); - - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); - - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); -} - -TEST(Converters, ATenMaxDimConvertsCorrectly) { - const auto graph = R"IR( - graph(%x.1 : Tensor): - %2 : int = prim::Constant[value=0]() - %3 : bool = prim::Constant[value=0]() - %4 : Tensor, %5 : Tensor = aten::max(%x.1, %2, %3) - return (%4, %5))IR"; - - auto g = std::make_shared(); - torch::jit::parseIR(graph, g.get()); - - auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); - - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); - - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); - - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); -} +#include +#include "core/compiler.h" +#include "gtest/gtest.h" +#include "tests/util/util.h" +#include "torch/csrc/jit/ir/irparser.h" + +TEST(Converters, ATenTopKConvertsCorrectly) { + const auto graph = R"IR( + graph(%0 : Tensor): + %1 : int = prim::Constant[value=20]() + %2 : int = prim::Constant[value=-1]() + %3 : bool = prim::Constant[value=1]() + %4 : bool = prim::Constant[value=1]() + %5 : Tensor, %6 : Tensor = aten::topk(%0, %1, %2, %3, %4) + return (%5, %6))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({10, 10, 100}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); +} + +TEST(Converters, ATenMaxDimConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=0]() + %3 : bool = prim::Constant[value=0]() + %4 : Tensor, %5 : Tensor = aten::max(%x.1, %2, %3) + return (%4, %5))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); +} + +TEST(Converters, ATenMinDimConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=0]() + %3 : bool = prim::Constant[value=0]() + %4 : Tensor, %5 : Tensor = aten::min(%x.1, %2, %3) + return (%4, %5))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); +} + +TEST(Converters, ATenArgMaxConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=0]() + %3 : bool = prim::Constant[value=0]() + %4 : Tensor = aten::argmax(%x.1, %2, %3) + return (%4))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); +} + +TEST(Converters, ATenArgMaxKeepdimConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=1]() + %3 : bool = prim::Constant[value=1]() + %4 : Tensor = aten::argmax(%x.1, %2, %3) + return (%4))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); +} + +TEST(Converters, ATenArgMinConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=0]() + %3 : bool = prim::Constant[value=0]() + %4 : Tensor = aten::argmin(%x.1, %2, %3) + return (%4))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); +} + +TEST(Converters, ATenArgMinKeepdimConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=1]() + %3 : bool = prim::Constant[value=1]() + %4 : Tensor = aten::argmin(%x.1, %2, %3) + return (%4))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); +} From 4404280dc32ca7eb6a86bc2053c3426e5ea1fcac Mon Sep 17 00:00:00 2001 From: Michael Feliz Date: Wed, 31 Aug 2022 11:03:59 -0700 Subject: [PATCH 2/3] move max.cpp tests to test_max.cpp no functional change --- tests/core/conversion/converters/BUILD | 5 + tests/core/conversion/converters/test_max.cpp | 147 +++++++++++++ .../core/conversion/converters/test_topk.cpp | 206 +++--------------- 3 files changed, 184 insertions(+), 174 deletions(-) create mode 100644 tests/core/conversion/converters/test_max.cpp diff --git a/tests/core/conversion/converters/BUILD b/tests/core/conversion/converters/BUILD index 82bc2f7033..5246de4cf1 100644 --- a/tests/core/conversion/converters/BUILD +++ b/tests/core/conversion/converters/BUILD @@ -71,6 +71,10 @@ converter_test( name = "test_matrix_multiply", ) +converter_test( + name = "test_max", +) + converter_test( name = "test_normalize", ) @@ -156,6 +160,7 @@ test_suite( ":test_linear", ":test_lstm_cell", ":test_matrix_multiply", + ":test_max", ":test_normalize", ":test_pooling", ":test_reduce", diff --git a/tests/core/conversion/converters/test_max.cpp b/tests/core/conversion/converters/test_max.cpp new file mode 100644 index 0000000000..dfc2432c24 --- /dev/null +++ b/tests/core/conversion/converters/test_max.cpp @@ -0,0 +1,147 @@ +#include +#include "core/compiler.h" +#include "gtest/gtest.h" +#include "tests/util/util.h" +#include "torch/csrc/jit/ir/irparser.h" + +TEST(Converters, ATenMaxDimConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=0]() + %3 : bool = prim::Constant[value=0]() + %4 : Tensor, %5 : Tensor = aten::max(%x.1, %2, %3) + return (%4, %5))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); +} + +TEST(Converters, ATenMinDimConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=0]() + %3 : bool = prim::Constant[value=0]() + %4 : Tensor, %5 : Tensor = aten::min(%x.1, %2, %3) + return (%4, %5))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); +} + +TEST(Converters, ATenArgMaxConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=0]() + %3 : bool = prim::Constant[value=0]() + %4 : Tensor = aten::argmax(%x.1, %2, %3) + return (%4))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); +} + +TEST(Converters, ATenArgMaxKeepdimConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=1]() + %3 : bool = prim::Constant[value=1]() + %4 : Tensor = aten::argmax(%x.1, %2, %3) + return (%4))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); +} + +TEST(Converters, ATenArgMinConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=0]() + %3 : bool = prim::Constant[value=0]() + %4 : Tensor = aten::argmin(%x.1, %2, %3) + return (%4))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); +} + +TEST(Converters, ATenArgMinKeepdimConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=1]() + %3 : bool = prim::Constant[value=1]() + %4 : Tensor = aten::argmin(%x.1, %2, %3) + return (%4))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); +} diff --git a/tests/core/conversion/converters/test_topk.cpp b/tests/core/conversion/converters/test_topk.cpp index 001c6ee955..c53d209c1f 100644 --- a/tests/core/conversion/converters/test_topk.cpp +++ b/tests/core/conversion/converters/test_topk.cpp @@ -1,174 +1,32 @@ -#include -#include "core/compiler.h" -#include "gtest/gtest.h" -#include "tests/util/util.h" -#include "torch/csrc/jit/ir/irparser.h" - -TEST(Converters, ATenTopKConvertsCorrectly) { - const auto graph = R"IR( - graph(%0 : Tensor): - %1 : int = prim::Constant[value=20]() - %2 : int = prim::Constant[value=-1]() - %3 : bool = prim::Constant[value=1]() - %4 : bool = prim::Constant[value=1]() - %5 : Tensor, %6 : Tensor = aten::topk(%0, %1, %2, %3, %4) - return (%5, %6))IR"; - - auto g = std::make_shared(); - torch::jit::parseIR(graph, g.get()); - - auto in = at::rand({10, 10, 100}, {at::kCUDA}); - - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); - - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); - - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); -} - -TEST(Converters, ATenMaxDimConvertsCorrectly) { - const auto graph = R"IR( - graph(%x.1 : Tensor): - %2 : int = prim::Constant[value=0]() - %3 : bool = prim::Constant[value=0]() - %4 : Tensor, %5 : Tensor = aten::max(%x.1, %2, %3) - return (%4, %5))IR"; - - auto g = std::make_shared(); - torch::jit::parseIR(graph, g.get()); - - auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); - - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); - - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); - - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); -} - -TEST(Converters, ATenMinDimConvertsCorrectly) { - const auto graph = R"IR( - graph(%x.1 : Tensor): - %2 : int = prim::Constant[value=0]() - %3 : bool = prim::Constant[value=0]() - %4 : Tensor, %5 : Tensor = aten::min(%x.1, %2, %3) - return (%4, %5))IR"; - - auto g = std::make_shared(); - torch::jit::parseIR(graph, g.get()); - - auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); - - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); - - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); - - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); -} - -TEST(Converters, ATenArgMaxConvertsCorrectly) { - const auto graph = R"IR( - graph(%x.1 : Tensor): - %2 : int = prim::Constant[value=0]() - %3 : bool = prim::Constant[value=0]() - %4 : Tensor = aten::argmax(%x.1, %2, %3) - return (%4))IR"; - - auto g = std::make_shared(); - torch::jit::parseIR(graph, g.get()); - - auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); - - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); - - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); - - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); -} - -TEST(Converters, ATenArgMaxKeepdimConvertsCorrectly) { - const auto graph = R"IR( - graph(%x.1 : Tensor): - %2 : int = prim::Constant[value=1]() - %3 : bool = prim::Constant[value=1]() - %4 : Tensor = aten::argmax(%x.1, %2, %3) - return (%4))IR"; - - auto g = std::make_shared(); - torch::jit::parseIR(graph, g.get()); - - auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); - - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); - - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); - - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); -} - -TEST(Converters, ATenArgMinConvertsCorrectly) { - const auto graph = R"IR( - graph(%x.1 : Tensor): - %2 : int = prim::Constant[value=0]() - %3 : bool = prim::Constant[value=0]() - %4 : Tensor = aten::argmin(%x.1, %2, %3) - return (%4))IR"; - - auto g = std::make_shared(); - torch::jit::parseIR(graph, g.get()); - - auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); - - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); - - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); - - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); -} - -TEST(Converters, ATenArgMinKeepdimConvertsCorrectly) { - const auto graph = R"IR( - graph(%x.1 : Tensor): - %2 : int = prim::Constant[value=1]() - %3 : bool = prim::Constant[value=1]() - %4 : Tensor = aten::argmin(%x.1, %2, %3) - return (%4))IR"; - - auto g = std::make_shared(); - torch::jit::parseIR(graph, g.get()); - - auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); - - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); - - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); - - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); -} +#include +#include "core/compiler.h" +#include "gtest/gtest.h" +#include "tests/util/util.h" +#include "torch/csrc/jit/ir/irparser.h" + +TEST(Converters, ATenTopKConvertsCorrectly) { + const auto graph = R"IR( + graph(%0 : Tensor): + %1 : int = prim::Constant[value=20]() + %2 : int = prim::Constant[value=-1]() + %3 : bool = prim::Constant[value=1]() + %4 : bool = prim::Constant[value=1]() + %5 : Tensor, %6 : Tensor = aten::topk(%0, %1, %2, %3, %4) + return (%5, %6))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({10, 10, 100}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); +} From 153733f9e7cef602e89e0d902271af7626931393 Mon Sep 17 00:00:00 2001 From: Michael Feliz Date: Thu, 1 Sep 2022 17:43:10 -0700 Subject: [PATCH 3/3] fix permissions on max.cpp --- core/conversion/converters/impl/max.cpp | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 core/conversion/converters/impl/max.cpp diff --git a/core/conversion/converters/impl/max.cpp b/core/conversion/converters/impl/max.cpp old mode 100755 new mode 100644