-
Notifications
You must be signed in to change notification settings - Fork 1.6k
model and test data to fix einsum importer #1132
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
6 tasks
asmorkalov
requested changes
Dec 25, 2023
Comment on lines
+46
to
+142
| ''' | ||
| It builds a model with two Gather ops sharing a single same indices: | ||
| [Input] -> Gather(indices=0) -> Gather(indices=0) -> [Output] | ||
| , where the two indices constants have the same name. | ||
| ''' | ||
| @ost.script() | ||
| def gather_shared_indices(x: ost.FLOAT[2, 1, 3, 4]) -> ost.FLOAT[3, 4]: | ||
| indices = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [], np.array([0], dtype=np.int64))) | ||
| y0 = op.Gather(x, indices, axis=0) | ||
| y1 = op.Gather(y0, indices, axis=0) | ||
| return y1 | ||
| make_model_and_data(gather_shared_indices, np.random.rand(2, 1, 3, 4).astype(np.float32)) | ||
|
|
||
| ''' | ||
| [Input] -> Greater(B=61) -> [Output] | ||
| \ | ||
| dtype=np.int64 | ||
| ''' | ||
| @ost.script() | ||
| def greater_input_dtype_int64(x: ost.FLOAT[27, 9]) ->ost.BOOL[27, 9]: | ||
| y = op.Greater(x, op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [], np.array([61], dtype=np.int64)))) | ||
| return y | ||
| make_model_and_data(greater_input_dtype_int64, np.random.randint(0, 100, size=[27, 9], dtype=np.int64), force_saving_input_as_dtype_float32=True, force_saving_output_as_dtype_float32=True) | ||
|
|
||
| from onnxscript import opset11 | ||
|
|
||
| @ost.script() | ||
| def two_resizes_with_shared_subgraphs(x: ost.FLOAT["batch", 1, "height", "width"], y: ost.FLOAT[1, 1, 3, 2], z: ost.FLOAT[1, 1, 2, 1]) ->ost.FLOAT["batch", 1, "height", "width"]: | ||
| shape_src_1 = opset11.Shape(x) | ||
| shape_src_2 = opset11.Shape(x) | ||
| gather_h = opset11.Gather(shape_src_1, opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [], np.array([2], dtype=np.int64))), axis=0) | ||
| gather_w = opset11.Gather(shape_src_2, opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [], np.array([3], dtype=np.int64))), axis=0) | ||
| unsqueeze_w_1 = opset11.Unsqueeze(gather_w, axes=[0]) | ||
| unsqueeze_w_2 = opset11.Unsqueeze(gather_w, axes=[0]) | ||
| unsqueeze_h_1 = opset11.Unsqueeze(gather_h, axes=[0]) | ||
| unsqueeze_h_2 = opset11.Unsqueeze(gather_h, axes=[0]) | ||
| concat_1 = opset11.Cast(opset11.Concat(unsqueeze_h_1, unsqueeze_w_1, axis=0), to=ost.INT64.dtype) | ||
| concat_2 = opset11.Cast(opset11.Concat(unsqueeze_h_2, unsqueeze_w_2, axis=0), to=ost.INT64.dtype) | ||
|
|
||
| # This op is required to test double node removal | ||
| y = opset11.Add(y, opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [1], np.array([0.5], dtype=np.float32)))) | ||
|
|
||
| # First branch | ||
| sliced = opset11.Slice(opset11.Shape(y), | ||
| starts=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([0], dtype=np.int64))), | ||
| ends=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([2], dtype=np.int64))), | ||
| axes=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([0], dtype=np.int64))), | ||
| ) | ||
| concat_y = opset11.Concat(sliced, concat_1, axis=0) | ||
| resized_y = opset11.Resize(y, | ||
| roi=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [0], np.empty([0]))), | ||
| scales=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [0], np.empty([0]))), | ||
| sizes=concat_y, | ||
| coordinate_transformation_mode='pytorch_half_pixel', | ||
| cubic_coeff_a=-0.75, | ||
| mode='linear', | ||
| nearest_mode='floor' | ||
| ) | ||
|
|
||
| # Second branch | ||
| sliced = opset11.Slice(opset11.Shape(z), | ||
| starts=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([0], dtype=np.int64))), | ||
| ends=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([2], dtype=np.int64))), | ||
| axes=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([0], dtype=np.int64))), | ||
| ) | ||
| concat_z = opset11.Concat(sliced, concat_2, axis=0) | ||
| resized_z = opset11.Resize(z, | ||
| roi=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [0], np.empty([0]))), | ||
| scales=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [0], np.empty([0]))), | ||
| sizes=concat_z, | ||
| coordinate_transformation_mode='pytorch_half_pixel', | ||
| cubic_coeff_a=-0.75, | ||
| mode='linear', | ||
| nearest_mode='floor' | ||
| ) | ||
|
|
||
| return opset11.Add(resized_y, resized_z) | ||
|
|
||
| make_model_and_data(two_resizes_with_shared_subgraphs, np.random.rand(1, 1, 4, 5).astype(np.float32), np.random.rand(1, 1, 3, 2).astype(np.float32), np.random.rand(1, 1, 2, 1).astype(np.float32)) | ||
|
|
||
|
|
||
| @ost.script() | ||
| def bias_gelu(x: ost.FLOAT[1, 2, 3]) -> ost.FLOAT[1, 2, 3]: | ||
| bias = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [3], np.array([0.1, 0.3, 0.2], dtype=np.float32))) | ||
| add1 = op.Add(x, bias) | ||
| tmp = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [], np.array([np.sqrt(2)], dtype=np.float32))) | ||
| div = op.Div(add1, tmp) | ||
| erf = op.Erf(div) | ||
| tmp_0 = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [], np.array([1], dtype=np.float32))) | ||
| add2 = op.Add(erf, tmp_0) | ||
| mul = op.Mul(add1, add2) | ||
| tmp_1 = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [], np.array([0.5], dtype=np.float32))) | ||
| return op.Mul(mul, tmp_1) | ||
|
|
||
| make_model_and_data(bias_gelu, np.random.rand(1, 2, 3).astype(np.float32)) |
Contributor
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@fengyuentau It's not relevant change.
Member
Author
|
They are mis-commented in the Attention PR.
________________________________
发件人: Alexander Smorkalov ***@***.***>
发送时间: Monday, December 25, 2023 6:55:45 PM
收件人: opencv/opencv_extra ***@***.***>
抄送: Yuantao Feng ***@***.***>; Mention ***@***.***>
主题: Re: [opencv/opencv_extra] model and test data to fix einsum importer (PR #1132)
@asmorkalov requested changes on this pull request.
________________________________
In testdata/dnn/onnx/generate_onnx_models_with_onnxscript.py<#1132 (comment)>:
+'''
+ It builds a model with two Gather ops sharing a single same indices:
+
+ [Input] -> Gather(indices=0) -> Gather(indices=0) -> [Output]
+
+ , where the two indices constants have the same name.
+'''
***@***.***()
+def gather_shared_indices(x: ost.FLOAT[2, 1, 3, 4]) -> ost.FLOAT[3, 4]:
+ indices = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [], np.array([0], dtype=np.int64)))
+ y0 = op.Gather(x, indices, axis=0)
+ y1 = op.Gather(y0, indices, axis=0)
+ return y1
+make_model_and_data(gather_shared_indices, np.random.rand(2, 1, 3, 4).astype(np.float32))
+
+'''
+ [Input] -> Greater(B=61) -> [Output]
+ \
+ dtype=np.int64
+'''
***@***.***()
+def greater_input_dtype_int64(x: ost.FLOAT[27, 9]) ->ost.BOOL[27, 9]:
+ y = op.Greater(x, op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [], np.array([61], dtype=np.int64))))
+ return y
+make_model_and_data(greater_input_dtype_int64, np.random.randint(0, 100, size=[27, 9], dtype=np.int64), force_saving_input_as_dtype_float32=True, force_saving_output_as_dtype_float32=True)
+
+from onnxscript import opset11
+
***@***.***()
+def two_resizes_with_shared_subgraphs(x: ost.FLOAT["batch", 1, "height", "width"], y: ost.FLOAT[1, 1, 3, 2], z: ost.FLOAT[1, 1, 2, 1]) ->ost.FLOAT["batch", 1, "height", "width"]:
+ shape_src_1 = opset11.Shape(x)
+ shape_src_2 = opset11.Shape(x)
+ gather_h = opset11.Gather(shape_src_1, opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [], np.array([2], dtype=np.int64))), axis=0)
+ gather_w = opset11.Gather(shape_src_2, opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [], np.array([3], dtype=np.int64))), axis=0)
+ unsqueeze_w_1 = opset11.Unsqueeze(gather_w, axes=[0])
+ unsqueeze_w_2 = opset11.Unsqueeze(gather_w, axes=[0])
+ unsqueeze_h_1 = opset11.Unsqueeze(gather_h, axes=[0])
+ unsqueeze_h_2 = opset11.Unsqueeze(gather_h, axes=[0])
+ concat_1 = opset11.Cast(opset11.Concat(unsqueeze_h_1, unsqueeze_w_1, axis=0), to=ost.INT64.dtype)
+ concat_2 = opset11.Cast(opset11.Concat(unsqueeze_h_2, unsqueeze_w_2, axis=0), to=ost.INT64.dtype)
+
+ # This op is required to test double node removal
+ y = opset11.Add(y, opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [1], np.array([0.5], dtype=np.float32))))
+
+ # First branch
+ sliced = opset11.Slice(opset11.Shape(y),
+ starts=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([0], dtype=np.int64))),
+ ends=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([2], dtype=np.int64))),
+ axes=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([0], dtype=np.int64))),
+ )
+ concat_y = opset11.Concat(sliced, concat_1, axis=0)
+ resized_y = opset11.Resize(y,
+ roi=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [0], np.empty([0]))),
+ scales=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [0], np.empty([0]))),
+ sizes=concat_y,
+ coordinate_transformation_mode='pytorch_half_pixel',
+ cubic_coeff_a=-0.75,
+ mode='linear',
+ nearest_mode='floor'
+ )
+
+ # Second branch
+ sliced = opset11.Slice(opset11.Shape(z),
+ starts=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([0], dtype=np.int64))),
+ ends=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([2], dtype=np.int64))),
+ axes=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.INT64, [1], np.array([0], dtype=np.int64))),
+ )
+ concat_z = opset11.Concat(sliced, concat_2, axis=0)
+ resized_z = opset11.Resize(z,
+ roi=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [0], np.empty([0]))),
+ scales=opset11.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [0], np.empty([0]))),
+ sizes=concat_z,
+ coordinate_transformation_mode='pytorch_half_pixel',
+ cubic_coeff_a=-0.75,
+ mode='linear',
+ nearest_mode='floor'
+ )
+
+ return opset11.Add(resized_y, resized_z)
+
+make_model_and_data(two_resizes_with_shared_subgraphs, np.random.rand(1, 1, 4, 5).astype(np.float32), np.random.rand(1, 1, 3, 2).astype(np.float32), np.random.rand(1, 1, 2, 1).astype(np.float32))
+
+
***@***.***()
+def bias_gelu(x: ost.FLOAT[1, 2, 3]) -> ost.FLOAT[1, 2, 3]:
+ bias = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [3], np.array([0.1, 0.3, 0.2], dtype=np.float32)))
+ add1 = op.Add(x, bias)
+ tmp = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [], np.array([np.sqrt(2)], dtype=np.float32)))
+ div = op.Div(add1, tmp)
+ erf = op.Erf(div)
+ tmp_0 = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [], np.array([1], dtype=np.float32)))
+ add2 = op.Add(erf, tmp_0)
+ mul = op.Mul(add1, add2)
+ tmp_1 = op.Constant(value=onnx.helper.make_tensor("", onnx.TensorProto.FLOAT, [], np.array([0.5], dtype=np.float32)))
+ return op.Mul(mul, tmp_1)
+
+make_model_and_data(bias_gelu, np.random.rand(1, 2, 3).astype(np.float32))
@fengyuentau<https://github.com/fengyuentau> It's not relevant change.
―
Reply to this email directly, view it on GitHub<#1132 (review)>, or unsubscribe<https://github.com/notifications/unsubscribe-auth/AEDL63RALKZN6CU5UR4K2VLYLFLTDAVCNFSM6AAAAABA77JS3SVHI2DSMVQWIX3LMV43YUDVNRWFEZLROVSXG5CSMV3GSZLXHMYTOOJVHA2DQOJWGA>.
You are receiving this because you were mentioned.Message ID: ***@***.***>
|
asmorkalov
pushed a commit
to opencv/opencv
that referenced
this pull request
Dec 25, 2023
dnn onnx: support constaint inputs in einsum importer #24753 Merge with opencv/opencv_extra#1132. Resolves #24697 Credits to @LaurentBerger. --- This is a workaround. I suggest to get input shapes and calculate the output shapes in `getMemoryShapes` so as to keep the best compatibility. It is not always robust getting shapes during the importer stage and we should avoid that as much as possible. ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake
thewoz
pushed a commit
to thewoz/opencv
that referenced
this pull request
May 29, 2024
dnn onnx: support constaint inputs in einsum importer opencv#24753 Merge with opencv/opencv_extra#1132. Resolves opencv#24697 Credits to @LaurentBerger. --- This is a workaround. I suggest to get input shapes and calculate the output shapes in `getMemoryShapes` so as to keep the best compatibility. It is not always robust getting shapes during the importer stage and we should avoid that as much as possible. ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Merge with opencv/opencv#24753