From 4b2e2f987ce429a3e95064202ec268b0afacb38c Mon Sep 17 00:00:00 2001 From: Anurag Dixit Date: Mon, 10 Jul 2023 13:36:53 -0700 Subject: [PATCH 1/5] chore: rebase with main branch Signed-off-by: Anurag Dixit --- core/conversion/converters/impl/shuffle.cpp | 98 +++++++++++++ .../conversion/converters/test_shuffle.cpp | 52 +++++++ tests/cpp/test_dynamic_size.cpp | 135 +++++++++++++++++- 3 files changed, 284 insertions(+), 1 deletion(-) diff --git a/core/conversion/converters/impl/shuffle.cpp b/core/conversion/converters/impl/shuffle.cpp index f758c0cc47..bc92964a69 100644 --- a/core/conversion/converters/impl/shuffle.cpp +++ b/core/conversion/converters/impl/shuffle.cpp @@ -64,6 +64,104 @@ static auto shuffle_registrations TORCHTRT_UNUSED = LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions()); return true; }}) + .pattern( + {"aten::unflatten.int(Tensor self, int dim, int[] sizes) -> (Tensor)", + [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + auto in = args[0].ITensorOrFreeze(ctx); + auto dim = args[1].unwrapToInt(); + auto in_shape = util::toVec(in->getDimensions()); + std::vector new_shape; + nvinfer1::ITensor* shape_tensor; + if (ctx->input_is_dynamic) { + /* + * In case the dim is negative + * If the dim in negative range is larger than in_shape, + * then it should run into index out of bound error as expected + */ + if (dim < 0) { + dim = in_shape.size() + dim; + } + std::cout << "Dynamic shape case" << std::endl; + LOG_DEBUG("Using dynamic version of reshape layer"); + if (args[2].isITensorList()) { + std::cout << "isTensorList case" << std::endl; + LOG_DEBUG("Shape tensor is an ITensorList"); + auto expand_shape = args[2].unwrapToITensorList(); + auto shape_layer = ctx->net->addShape(*in); + TORCHTRT_CHECK(shape_layer, "Unable to create shape layer from node: " << *n); + auto shape_1d_tensor = shape_layer->getOutput(0); + + std::vector before_dim_indices_vector(dim); + std::iota(before_dim_indices_vector.begin(), before_dim_indices_vector.end(), 0); + + nvinfer1::ITensor* before_dim_gather_out = nullptr; + if(before_dim_indices_vector.size()){ + at::Tensor before_dim_indices = torch::tensor(before_dim_indices_vector).to(torch::kI32); + auto before_dim_indices_out = converters::tensor_to_const(ctx, before_dim_indices); + auto before_dim_gather_layer = ctx->net->addGather(*shape_1d_tensor, *before_dim_indices_out, 0); + TORCHTRT_CHECK(before_dim_gather_layer, "Unable to create gather layer from node: " << *n); + before_dim_gather_out = before_dim_gather_layer->getOutput(0); + } + + std::vector after_dim_indices_vector(in_shape.size() - (dim + 1)); + std::iota(after_dim_indices_vector.begin(), after_dim_indices_vector.end(), dim + 1); + + nvinfer1::ITensor* after_dim_gather_out = nullptr; + if(after_dim_indices_vector.size()){ + at::Tensor after_dim_indices = torch::tensor(after_dim_indices_vector).to(torch::kI32); + auto after_dim_indices_out = converters::tensor_to_const(ctx, after_dim_indices); + auto after_dim_gather_layer = ctx->net->addGather(*shape_1d_tensor, *after_dim_indices_out, 0); + TORCHTRT_CHECK(after_dim_gather_layer, "Unable to create gather layer from node: " << *n); + after_dim_gather_out = after_dim_gather_layer->getOutput(0); + } + + std::vector shape_tensors; + if(before_dim_gather_out){ + shape_tensors.push_back(before_dim_gather_out); + } + for(auto new_shape_tensor : expand_shape){ + shape_tensors.push_back(new_shape_tensor); + } + if(after_dim_gather_out){ + shape_tensors.push_back(after_dim_gather_out); + } + + auto shape_cat_layer = ctx->net->addConcatenation(shape_tensors.data(), shape_tensors.size()); + TORCHTRT_CHECK(shape_cat_layer, "Unable to create cat layer from node: " << *n); + shape_tensor = shape_cat_layer->getOutput(0); + LOG_DEBUG("Shape tensor shape: " << shape_tensor->getDimensions()); + } else if (args[2].isIntList()) { + auto shape_vec = args[2].unwrapToIntList().vec(); + // New shape + new_shape.insert(new_shape.end(), in_shape.begin(), in_shape.begin() + dim); + new_shape.insert(new_shape.end(), shape_vec.begin(), shape_vec.end()); + new_shape.insert(new_shape.end(), in_shape.begin() + dim + 1, in_shape.end()); + + shape_tensor = tensor_to_const(ctx, torch::tensor(new_shape).to(torch::kI32)); + } else { + LOG_ERROR( + "Invalid IValue type of " << args[2].ivalue_type() + << " detected for shape tensor from node: " << *n); + } + } + else { + new_shape = torch::unflatten(torch::rand(in_shape), dim, args[2].unwrapToIntList().vec()).sizes().vec(); + } + auto shuffle = ctx->net->addShuffle(*in); + shuffle->setName(util::node_info(n).c_str()); + TORCHTRT_CHECK(shuffle, "Unable to create shuffle layer from node: " << *n); + + if (ctx->input_is_dynamic) { + shuffle->setInput(1, *shape_tensor); + } else { + shuffle->setReshapeDimensions(util::toDims(new_shape)); + } + + auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], shuffle->getOutput(0)); + LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions()); + + return true; + }}) .pattern( {"aten::reshape(Tensor self, int[] shape) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/tests/core/conversion/converters/test_shuffle.cpp b/tests/core/conversion/converters/test_shuffle.cpp index fad50c9340..9c972ba988 100644 --- a/tests/core/conversion/converters/test_shuffle.cpp +++ b/tests/core/conversion/converters/test_shuffle.cpp @@ -364,3 +364,55 @@ TEST(Converters, ATenPixelShuffle5DConvertsCorrectly) { ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); } + +TEST(Converters, ATenUnflattenConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=1]() + %3 : int = prim::Constant[value=512]() + %4 : int = prim::Constant[value=1]() + %5 : int = prim::Constant[value=1]() + %6 : int[] = prim::ListConstruct(%3, %4, %5) + %7 : Tensor = aten::unflatten(%x.1, %2, %6) + return (%7))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::randint(0, 5, {1, 512}, {at::kCUDA}); + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + in = at::clone(in); + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); +} + +TEST(Converters, ATenUnflattenNegativeDimConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=-1]() + %3 : int = prim::Constant[value=512]() + %4 : int = prim::Constant[value=1]() + %5 : int = prim::Constant[value=1]() + %6 : int[] = prim::ListConstruct(%3, %4, %5) + %7 : Tensor = aten::unflatten(%x.1, %2, %6) + return (%7))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::randint(0, 5, {1, 512}, {at::kCUDA}); + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + in = at::clone(in); + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); +} \ No newline at end of file diff --git a/tests/cpp/test_dynamic_size.cpp b/tests/cpp/test_dynamic_size.cpp index 9e46842d9c..e8765d6570 100644 --- a/tests/cpp/test_dynamic_size.cpp +++ b/tests/cpp/test_dynamic_size.cpp @@ -124,4 +124,137 @@ TEST(Converters, ATenResizeGetItemDynShapeMulCorrectly) { auto trt = trt_results[0].reshape(jit_results[0].sizes()); ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt, 2e-6)); -} \ No newline at end of file +} + +TEST(Converters, ATenUnflattenDynShapeShapeCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=1]() + %3 : int = prim::Constant[value=512]() + %4 : int = prim::Constant[value=1]() + %5 : int = prim::Constant[value=1]() + %6 : int[] = prim::ListConstruct(%3, %4, %5) + %7 : Tensor = aten::unflatten(%x.1, %2, %6) + return (%7))IR"; + + auto g = std::make_shared(); + + torch::jit::parseIR(graph, g.get()); + + auto in = at::randint(0, 10, {1, 512}, {at::kCUDA}); + + auto jit_in = at::clone(in); + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in}); + + auto trt_in = at::clone(in); + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true); + + ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); +} + +TEST(Converters, ATenUnflattenDynShapeNegativeDimsShapeCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=-2]() + %3 : int = prim::Constant[value=512]() + %4 : int = prim::Constant[value=1]() + %5 : int = prim::Constant[value=1]() + %6 : int[] = prim::ListConstruct(%3, %4, %5) + %7 : Tensor = aten::unflatten(%x.1, %2, %6) + return (%7))IR"; + + auto g = std::make_shared(); + + torch::jit::parseIR(graph, g.get()); + + auto in = at::randint(0, 10, {1, 512, 2}, {at::kCUDA}); + + auto jit_in = at::clone(in); + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in}); + + auto trt_in = at::clone(in); + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true); + + ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); +} + +TEST(Converters, ATenUnflattenDynShapeITensorShapeCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=1]() + %3 : int = aten::size(%x.1, %2) + %4 : int = prim::Constant[value=256]() + %5 : int = prim::Constant[value=2]() + %6 : int[] = prim::ListConstruct(%4, %5) + %7 : Tensor = aten::unflatten(%x.1, %2, %6) + return (%7))IR"; + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::randint(0, 10, {1, 512, 1}, {at::kCUDA}); + + auto jit_in = at::clone(in); + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in}); + + auto trt_in = at::clone(in); + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true); + + ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); +} + +TEST(Converters, ATenUnflattenDynShapeITensorShapeCorrectlyFirstDim) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %1 : int = prim::Constant[value=0]() + %2 : int = prim::Constant[value=1]() + %3 : int = aten::size(%x.1, %1) + %6 : int[] = prim::ListConstruct(%2, %2, %3, %2, %2) + %7 : Tensor = aten::unflatten(%x.1, %1, %6) + return (%7))IR"; + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::randint(0, 10, {64, 512, 1}, {at::kCUDA}); + + auto jit_in = at::clone(in); + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in}); + + auto trt_in = at::clone(in); + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true); + + ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); +} + +TEST(Converters, ATenUnflattenDynShapeITensorShapeCorrectlyLastDim) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %1 : int = prim::Constant[value=2]() + %2 : int = prim::Constant[value=1]() + %3 : int = aten::size(%x.1, %1) + %5 : int = prim::Constant[value=2]() + %6 : int[] = prim::ListConstruct(%3, %2, %2) + %7 : Tensor = aten::unflatten(%x.1, %5, %6) + return (%7))IR"; + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::randint(0, 10, {1, 512, 9}, {at::kCUDA}); + + auto jit_in = at::clone(in); + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in}); + + auto trt_in = at::clone(in); + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true); + + ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); +} From dad46205b271e6bd2d244c89e1a81f043ca129e0 Mon Sep 17 00:00:00 2001 From: Anurag Dixit Date: Mon, 10 Jul 2023 13:58:28 -0700 Subject: [PATCH 2/5] chore: trigger lint Signed-off-by: Anurag Dixit --- tests/cpp/test_dynamic_size.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/cpp/test_dynamic_size.cpp b/tests/cpp/test_dynamic_size.cpp index e8765d6570..c79a4ca6c9 100644 --- a/tests/cpp/test_dynamic_size.cpp +++ b/tests/cpp/test_dynamic_size.cpp @@ -257,4 +257,4 @@ TEST(Converters, ATenUnflattenDynShapeITensorShapeCorrectlyLastDim) { auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true); ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); -} +} From b7e8d1ce328317a45df4054de352b2d0182a2514 Mon Sep 17 00:00:00 2001 From: Anurag Dixit Date: Mon, 10 Jul 2023 17:03:15 -0700 Subject: [PATCH 3/5] chore: apply lint Signed-off-by: Anurag Dixit --- core/conversion/converters/impl/shuffle.cpp | 192 ++++++++++---------- tests/cpp/test_dynamic_size.cpp | 2 +- 2 files changed, 97 insertions(+), 97 deletions(-) diff --git a/core/conversion/converters/impl/shuffle.cpp b/core/conversion/converters/impl/shuffle.cpp index bc92964a69..352729c67e 100644 --- a/core/conversion/converters/impl/shuffle.cpp +++ b/core/conversion/converters/impl/shuffle.cpp @@ -66,102 +66,102 @@ static auto shuffle_registrations TORCHTRT_UNUSED = }}) .pattern( {"aten::unflatten.int(Tensor self, int dim, int[] sizes) -> (Tensor)", - [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { - auto in = args[0].ITensorOrFreeze(ctx); - auto dim = args[1].unwrapToInt(); - auto in_shape = util::toVec(in->getDimensions()); - std::vector new_shape; - nvinfer1::ITensor* shape_tensor; - if (ctx->input_is_dynamic) { - /* - * In case the dim is negative - * If the dim in negative range is larger than in_shape, - * then it should run into index out of bound error as expected - */ - if (dim < 0) { - dim = in_shape.size() + dim; - } - std::cout << "Dynamic shape case" << std::endl; - LOG_DEBUG("Using dynamic version of reshape layer"); - if (args[2].isITensorList()) { - std::cout << "isTensorList case" << std::endl; - LOG_DEBUG("Shape tensor is an ITensorList"); - auto expand_shape = args[2].unwrapToITensorList(); - auto shape_layer = ctx->net->addShape(*in); - TORCHTRT_CHECK(shape_layer, "Unable to create shape layer from node: " << *n); - auto shape_1d_tensor = shape_layer->getOutput(0); - - std::vector before_dim_indices_vector(dim); - std::iota(before_dim_indices_vector.begin(), before_dim_indices_vector.end(), 0); - - nvinfer1::ITensor* before_dim_gather_out = nullptr; - if(before_dim_indices_vector.size()){ - at::Tensor before_dim_indices = torch::tensor(before_dim_indices_vector).to(torch::kI32); - auto before_dim_indices_out = converters::tensor_to_const(ctx, before_dim_indices); - auto before_dim_gather_layer = ctx->net->addGather(*shape_1d_tensor, *before_dim_indices_out, 0); - TORCHTRT_CHECK(before_dim_gather_layer, "Unable to create gather layer from node: " << *n); - before_dim_gather_out = before_dim_gather_layer->getOutput(0); - } - - std::vector after_dim_indices_vector(in_shape.size() - (dim + 1)); - std::iota(after_dim_indices_vector.begin(), after_dim_indices_vector.end(), dim + 1); - - nvinfer1::ITensor* after_dim_gather_out = nullptr; - if(after_dim_indices_vector.size()){ - at::Tensor after_dim_indices = torch::tensor(after_dim_indices_vector).to(torch::kI32); - auto after_dim_indices_out = converters::tensor_to_const(ctx, after_dim_indices); - auto after_dim_gather_layer = ctx->net->addGather(*shape_1d_tensor, *after_dim_indices_out, 0); - TORCHTRT_CHECK(after_dim_gather_layer, "Unable to create gather layer from node: " << *n); - after_dim_gather_out = after_dim_gather_layer->getOutput(0); - } - - std::vector shape_tensors; - if(before_dim_gather_out){ - shape_tensors.push_back(before_dim_gather_out); - } - for(auto new_shape_tensor : expand_shape){ - shape_tensors.push_back(new_shape_tensor); - } - if(after_dim_gather_out){ - shape_tensors.push_back(after_dim_gather_out); - } - - auto shape_cat_layer = ctx->net->addConcatenation(shape_tensors.data(), shape_tensors.size()); - TORCHTRT_CHECK(shape_cat_layer, "Unable to create cat layer from node: " << *n); - shape_tensor = shape_cat_layer->getOutput(0); - LOG_DEBUG("Shape tensor shape: " << shape_tensor->getDimensions()); - } else if (args[2].isIntList()) { - auto shape_vec = args[2].unwrapToIntList().vec(); - // New shape - new_shape.insert(new_shape.end(), in_shape.begin(), in_shape.begin() + dim); - new_shape.insert(new_shape.end(), shape_vec.begin(), shape_vec.end()); - new_shape.insert(new_shape.end(), in_shape.begin() + dim + 1, in_shape.end()); - - shape_tensor = tensor_to_const(ctx, torch::tensor(new_shape).to(torch::kI32)); - } else { - LOG_ERROR( - "Invalid IValue type of " << args[2].ivalue_type() - << " detected for shape tensor from node: " << *n); - } - } - else { - new_shape = torch::unflatten(torch::rand(in_shape), dim, args[2].unwrapToIntList().vec()).sizes().vec(); - } - auto shuffle = ctx->net->addShuffle(*in); - shuffle->setName(util::node_info(n).c_str()); - TORCHTRT_CHECK(shuffle, "Unable to create shuffle layer from node: " << *n); - - if (ctx->input_is_dynamic) { - shuffle->setInput(1, *shape_tensor); - } else { - shuffle->setReshapeDimensions(util::toDims(new_shape)); - } - - auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], shuffle->getOutput(0)); - LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions()); - - return true; - }}) + [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + auto in = args[0].ITensorOrFreeze(ctx); + auto dim = args[1].unwrapToInt(); + auto in_shape = util::toVec(in->getDimensions()); + std::vector new_shape; + nvinfer1::ITensor* shape_tensor; + if (ctx->input_is_dynamic) { + /* + * In case the dim is negative + * If the dim in negative range is larger than in_shape, + * then it should run into index out of bound error as expected + */ + if (dim < 0) { + dim = in_shape.size() + dim; + } + std::cout << "Dynamic shape case" << std::endl; + LOG_DEBUG("Using dynamic version of reshape layer"); + if (args[2].isITensorList()) { + std::cout << "isTensorList case" << std::endl; + LOG_DEBUG("Shape tensor is an ITensorList"); + auto expand_shape = args[2].unwrapToITensorList(); + auto shape_layer = ctx->net->addShape(*in); + TORCHTRT_CHECK(shape_layer, "Unable to create shape layer from node: " << *n); + auto shape_1d_tensor = shape_layer->getOutput(0); + + std::vector before_dim_indices_vector(dim); + std::iota(before_dim_indices_vector.begin(), before_dim_indices_vector.end(), 0); + + nvinfer1::ITensor* before_dim_gather_out = nullptr; + if (before_dim_indices_vector.size()) { + at::Tensor before_dim_indices = torch::tensor(before_dim_indices_vector).to(torch::kI32); + auto before_dim_indices_out = converters::tensor_to_const(ctx, before_dim_indices); + auto before_dim_gather_layer = ctx->net->addGather(*shape_1d_tensor, *before_dim_indices_out, 0); + TORCHTRT_CHECK(before_dim_gather_layer, "Unable to create gather layer from node: " << *n); + before_dim_gather_out = before_dim_gather_layer->getOutput(0); + } + + std::vector after_dim_indices_vector(in_shape.size() - (dim + 1)); + std::iota(after_dim_indices_vector.begin(), after_dim_indices_vector.end(), dim + 1); + + nvinfer1::ITensor* after_dim_gather_out = nullptr; + if (after_dim_indices_vector.size()) { + at::Tensor after_dim_indices = torch::tensor(after_dim_indices_vector).to(torch::kI32); + auto after_dim_indices_out = converters::tensor_to_const(ctx, after_dim_indices); + auto after_dim_gather_layer = ctx->net->addGather(*shape_1d_tensor, *after_dim_indices_out, 0); + TORCHTRT_CHECK(after_dim_gather_layer, "Unable to create gather layer from node: " << *n); + after_dim_gather_out = after_dim_gather_layer->getOutput(0); + } + + std::vector shape_tensors; + if (before_dim_gather_out) { + shape_tensors.push_back(before_dim_gather_out); + } + for (auto new_shape_tensor : expand_shape) { + shape_tensors.push_back(new_shape_tensor); + } + if (after_dim_gather_out) { + shape_tensors.push_back(after_dim_gather_out); + } + + auto shape_cat_layer = ctx->net->addConcatenation(shape_tensors.data(), shape_tensors.size()); + TORCHTRT_CHECK(shape_cat_layer, "Unable to create cat layer from node: " << *n); + shape_tensor = shape_cat_layer->getOutput(0); + LOG_DEBUG("Shape tensor shape: " << shape_tensor->getDimensions()); + } else if (args[2].isIntList()) { + auto shape_vec = args[2].unwrapToIntList().vec(); + // New shape + new_shape.insert(new_shape.end(), in_shape.begin(), in_shape.begin() + dim); + new_shape.insert(new_shape.end(), shape_vec.begin(), shape_vec.end()); + new_shape.insert(new_shape.end(), in_shape.begin() + dim + 1, in_shape.end()); + + shape_tensor = tensor_to_const(ctx, torch::tensor(new_shape).to(torch::kI32)); + } else { + LOG_ERROR( + "Invalid IValue type of " << args[2].ivalue_type() + << " detected for shape tensor from node: " << *n); + } + } else { + new_shape = + torch::unflatten(torch::rand(in_shape), dim, args[2].unwrapToIntList().vec()).sizes().vec(); + } + auto shuffle = ctx->net->addShuffle(*in); + shuffle->setName(util::node_info(n).c_str()); + TORCHTRT_CHECK(shuffle, "Unable to create shuffle layer from node: " << *n); + + if (ctx->input_is_dynamic) { + shuffle->setInput(1, *shape_tensor); + } else { + shuffle->setReshapeDimensions(util::toDims(new_shape)); + } + + auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], shuffle->getOutput(0)); + LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions()); + + return true; + }}) .pattern( {"aten::reshape(Tensor self, int[] shape) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/tests/cpp/test_dynamic_size.cpp b/tests/cpp/test_dynamic_size.cpp index c79a4ca6c9..e8765d6570 100644 --- a/tests/cpp/test_dynamic_size.cpp +++ b/tests/cpp/test_dynamic_size.cpp @@ -257,4 +257,4 @@ TEST(Converters, ATenUnflattenDynShapeITensorShapeCorrectlyLastDim) { auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true); ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); -} +} From 948dc5886b8925cf348c9875d1e73650f3732e81 Mon Sep 17 00:00:00 2001 From: Anurag Dixit Date: Mon, 10 Jul 2023 17:27:11 -0700 Subject: [PATCH 4/5] chore: Adopting API change from main branch Signed-off-by: Anurag Dixit --- core/conversion/converters/impl/shuffle.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/conversion/converters/impl/shuffle.cpp b/core/conversion/converters/impl/shuffle.cpp index 352729c67e..5a8e992d90 100644 --- a/core/conversion/converters/impl/shuffle.cpp +++ b/core/conversion/converters/impl/shuffle.cpp @@ -140,7 +140,7 @@ static auto shuffle_registrations TORCHTRT_UNUSED = shape_tensor = tensor_to_const(ctx, torch::tensor(new_shape).to(torch::kI32)); } else { LOG_ERROR( - "Invalid IValue type of " << args[2].ivalue_type() + "Invalid IValue type of " << args[2].IValue()->type() << " detected for shape tensor from node: " << *n); } } else { From a47b5fe0ffe0c783b235bfc212da8a3f1387b307 Mon Sep 17 00:00:00 2001 From: Anurag Dixit Date: Tue, 11 Jul 2023 15:29:50 -0700 Subject: [PATCH 5/5] chore: Removing redundant test cases Signed-off-by: Anurag Dixit --- tests/cpp/test_dynamic_size.cpp | 56 --------------------------------- 1 file changed, 56 deletions(-) diff --git a/tests/cpp/test_dynamic_size.cpp b/tests/cpp/test_dynamic_size.cpp index e8765d6570..c1edff849d 100644 --- a/tests/cpp/test_dynamic_size.cpp +++ b/tests/cpp/test_dynamic_size.cpp @@ -126,62 +126,6 @@ TEST(Converters, ATenResizeGetItemDynShapeMulCorrectly) { ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt, 2e-6)); } -TEST(Converters, ATenUnflattenDynShapeShapeCorrectly) { - const auto graph = R"IR( - graph(%x.1 : Tensor): - %2 : int = prim::Constant[value=1]() - %3 : int = prim::Constant[value=512]() - %4 : int = prim::Constant[value=1]() - %5 : int = prim::Constant[value=1]() - %6 : int[] = prim::ListConstruct(%3, %4, %5) - %7 : Tensor = aten::unflatten(%x.1, %2, %6) - return (%7))IR"; - - auto g = std::make_shared(); - - torch::jit::parseIR(graph, g.get()); - - auto in = at::randint(0, 10, {1, 512}, {at::kCUDA}); - - auto jit_in = at::clone(in); - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in}); - - auto trt_in = at::clone(in); - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true); - - ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); -} - -TEST(Converters, ATenUnflattenDynShapeNegativeDimsShapeCorrectly) { - const auto graph = R"IR( - graph(%x.1 : Tensor): - %2 : int = prim::Constant[value=-2]() - %3 : int = prim::Constant[value=512]() - %4 : int = prim::Constant[value=1]() - %5 : int = prim::Constant[value=1]() - %6 : int[] = prim::ListConstruct(%3, %4, %5) - %7 : Tensor = aten::unflatten(%x.1, %2, %6) - return (%7))IR"; - - auto g = std::make_shared(); - - torch::jit::parseIR(graph, g.get()); - - auto in = at::randint(0, 10, {1, 512, 2}, {at::kCUDA}); - - auto jit_in = at::clone(in); - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in}); - - auto trt_in = at::clone(in); - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true); - - ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); -} - TEST(Converters, ATenUnflattenDynShapeITensorShapeCorrectly) { const auto graph = R"IR( graph(%x.1 : Tensor):