Skip to content

Conversation

@fengyuentau
Copy link
Member

@fengyuentau fengyuentau commented Dec 22, 2023

Merge with opencv/opencv#24753

@fengyuentau fengyuentau added the DNN dnn related tests and data label Dec 22, 2023
Comment on lines +46 to +142
'''
It builds a model with two Gather ops sharing a single same indices:
[Input] -> Gather(indices=0) -> Gather(indices=0) -> [Output]
, where the two indices constants have the same name.
'''
@ost.script()
def gather_shared_indices(x: ost.FLOAT[2, 1, 3, 4]) -> ost.FLOAT[3, 4]:
indices = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [], np.array([0], dtype=np.int64)))
y0 = op.Gather(x, indices, axis=0)
y1 = op.Gather(y0, indices, axis=0)
return y1
make_model_and_data(gather_shared_indices, np.random.rand(2, 1, 3, 4).astype(np.float32))

'''
[Input] -> Greater(B=61) -> [Output]
\
dtype=np.int64
'''
@ost.script()
def greater_input_dtype_int64(x: ost.FLOAT[27, 9]) ->ost.BOOL[27, 9]:
y = op.Greater(x, op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [], np.array([61], dtype=np.int64))))
return y
make_model_and_data(greater_input_dtype_int64, np.random.randint(0, 100, size=[27, 9], dtype=np.int64), force_saving_input_as_dtype_float32=True, force_saving_output_as_dtype_float32=True)

from onnxscript import opset11

@ost.script()
def two_resizes_with_shared_subgraphs(x: ost.FLOAT["batch", 1, "height", "width"], y: ost.FLOAT[1, 1, 3, 2], z: ost.FLOAT[1, 1, 2, 1]) ->ost.FLOAT["batch", 1, "height", "width"]:
shape_src_1 = opset11.Shape(x)
shape_src_2 = opset11.Shape(x)
gather_h = opset11.Gather(shape_src_1, opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [], np.array([2], dtype=np.int64))), axis=0)
gather_w = opset11.Gather(shape_src_2, opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [], np.array([3], dtype=np.int64))), axis=0)
unsqueeze_w_1 = opset11.Unsqueeze(gather_w, axes=[0])
unsqueeze_w_2 = opset11.Unsqueeze(gather_w, axes=[0])
unsqueeze_h_1 = opset11.Unsqueeze(gather_h, axes=[0])
unsqueeze_h_2 = opset11.Unsqueeze(gather_h, axes=[0])
concat_1 = opset11.Cast(opset11.Concat(unsqueeze_h_1, unsqueeze_w_1, axis=0), to=ost.INT64.dtype)
concat_2 = opset11.Cast(opset11.Concat(unsqueeze_h_2, unsqueeze_w_2, axis=0), to=ost.INT64.dtype)

# This op is required to test double node removal
y = opset11.Add(y, opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [1], np.array([0.5], dtype=np.float32))))

# First branch
sliced = opset11.Slice(opset11.Shape(y),
starts=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([0], dtype=np.int64))),
ends=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([2], dtype=np.int64))),
axes=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([0], dtype=np.int64))),
)
concat_y = opset11.Concat(sliced, concat_1, axis=0)
resized_y = opset11.Resize(y,
roi=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [0], np.empty([0]))),
scales=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [0], np.empty([0]))),
sizes=concat_y,
coordinate_transformation_mode='pytorch_half_pixel',
cubic_coeff_a=-0.75,
mode='linear',
nearest_mode='floor'
)

# Second branch
sliced = opset11.Slice(opset11.Shape(z),
starts=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([0], dtype=np.int64))),
ends=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([2], dtype=np.int64))),
axes=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([0], dtype=np.int64))),
)
concat_z = opset11.Concat(sliced, concat_2, axis=0)
resized_z = opset11.Resize(z,
roi=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [0], np.empty([0]))),
scales=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [0], np.empty([0]))),
sizes=concat_z,
coordinate_transformation_mode='pytorch_half_pixel',
cubic_coeff_a=-0.75,
mode='linear',
nearest_mode='floor'
)

return opset11.Add(resized_y, resized_z)

make_model_and_data(two_resizes_with_shared_subgraphs, np.random.rand(1, 1, 4, 5).astype(np.float32), np.random.rand(1, 1, 3, 2).astype(np.float32), np.random.rand(1, 1, 2, 1).astype(np.float32))


@ost.script()
def bias_gelu(x: ost.FLOAT[1, 2, 3]) -> ost.FLOAT[1, 2, 3]:
bias = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [3], np.array([0.1, 0.3, 0.2], dtype=np.float32)))
add1 = op.Add(x, bias)
tmp = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [], np.array([np.sqrt(2)], dtype=np.float32)))
div = op.Div(add1, tmp)
erf = op.Erf(div)
tmp_0 = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [], np.array([1], dtype=np.float32)))
add2 = op.Add(erf, tmp_0)
mul = op.Mul(add1, add2)
tmp_1 = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [], np.array([0.5], dtype=np.float32)))
return op.Mul(mul, tmp_1)

make_model_and_data(bias_gelu, np.random.rand(1, 2, 3).astype(np.float32))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@fengyuentau It's not relevant change.

@fengyuentau
Copy link
Member Author

fengyuentau commented Dec 25, 2023 via email

@asmorkalov asmorkalov merged commit 7f0ba7a into opencv:4.x Dec 25, 2023
asmorkalov pushed a commit to opencv/opencv that referenced this pull request Dec 25, 2023
dnn onnx: support constaint inputs in einsum importer #24753 

Merge with opencv/opencv_extra#1132.

Resolves #24697

Credits to @LaurentBerger.

---

This is a workaround. I suggest to get input shapes and calculate the output shapes in `getMemoryShapes` so as to keep the best compatibility. It is not always robust getting shapes during the importer stage and we should avoid that as much as possible.

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
@fengyuentau fengyuentau deleted the einsum_importer branch December 26, 2023 03:32
thewoz pushed a commit to thewoz/opencv that referenced this pull request May 29, 2024
dnn onnx: support constaint inputs in einsum importer opencv#24753 

Merge with opencv/opencv_extra#1132.

Resolves opencv#24697

Credits to @LaurentBerger.

---

This is a workaround. I suggest to get input shapes and calculate the output shapes in `getMemoryShapes` so as to keep the best compatibility. It is not always robust getting shapes during the importer stage and we should avoid that as much as possible.

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

DNN dnn related tests and data

Projects

None yet

Development

Successfully merging this pull request may close these issues.

2 participants