diff --git a/testdata/dnn/onnx/data/input_div_test_1x1_0.npy b/testdata/dnn/onnx/data/input_div_test_1x1_0.npy new file mode 100644 index 000000000..487769bcf Binary files /dev/null and b/testdata/dnn/onnx/data/input_div_test_1x1_0.npy differ diff --git a/testdata/dnn/onnx/data/input_div_test_1x1_1.npy b/testdata/dnn/onnx/data/input_div_test_1x1_1.npy new file mode 100644 index 000000000..e3ffd0e06 Binary files /dev/null and b/testdata/dnn/onnx/data/input_div_test_1x1_1.npy differ diff --git a/testdata/dnn/onnx/data/output_div_test_1x1.npy b/testdata/dnn/onnx/data/output_div_test_1x1.npy new file mode 100644 index 000000000..0192e0d45 Binary files /dev/null and b/testdata/dnn/onnx/data/output_div_test_1x1.npy differ diff --git a/testdata/dnn/onnx/generate_onnx_models.py b/testdata/dnn/onnx/generate_onnx_models.py index 7e9090299..8fa233464 100644 --- a/testdata/dnn/onnx/generate_onnx_models.py +++ b/testdata/dnn/onnx/generate_onnx_models.py @@ -92,6 +92,23 @@ def save_data_and_onnx_model(name, input_np, output_np, onnx_model): with open(models_files, 'wb') as file: file.write(model_def.SerializeToString()) +def save_data_and_onnx_model_multy_inputs(name, input_list, output_np, onnx_model): + for index in range(len(input_list)): + print(name + " input "+str(index)+" has sizes", input_list[index].shape) + input_files = os.path.join("data", "input_" + name + "_" + str(index)) + np.save(input_files, input_list[index]) + + print(name + " output has sizes", output_np.shape) + print() + output_files = os.path.join("data", "output_" + name) + np.save(output_files, np.ascontiguousarray(output_np.data)) + + models_files = os.path.join("models", name + ".onnx") + + onnx_model_pb = onnx._serialize(onnx_model) + model_def = assertONNXExpected(onnx_model_pb) + with open(models_files, 'wb') as file: + file.write(model_def.SerializeToString()) def simplify(name, rename=False, **kwargs): model, check = onnxsim.simplify(name, **kwargs) @@ -2091,3 +2108,23 @@ def gemm_reference_implementation(A: np.ndarray, B: np.ndarray, C: Optional[np.n output_np = np.sum(input_np, axis=1, keepdims=1) save_data_and_onnx_model("reduce_sum_axis_dynamic_batch", input_np, output_np, onnx_model) + + +# ########################## DivBroadcast ########################## +input_np = np.random.rand(1, 4).astype("float32") +input2_np = np.random.rand(1, 1).astype(np.float32) +inputs = [onnx.helper.make_tensor_value_info("input1", onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[input_np.dtype], shape=input_np.shape), \ + onnx.helper.make_tensor_value_info("input2", onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[input2_np.dtype], shape=input2_np.shape)] + +outputs = [onnx.helper.make_tensor_value_info("output", onnx.TensorProto.FLOAT, shape=(1, 4))] + +nodes = [onnx.helper.make_node("Div", ["input1", "input2"], ["output"])] + +graph = onnx.helper.make_graph(nodes, + "div_test", + inputs, + outputs) +onnx_model = onnx.helper.make_model(graph) + +output_np = input_np/input2_np +save_data_and_onnx_model_multy_inputs("div_test_1x1", [input_np, input2_np], output_np, onnx_model) diff --git a/testdata/dnn/onnx/models/div_test_1x1.onnx b/testdata/dnn/onnx/models/div_test_1x1.onnx new file mode 100644 index 000000000..52eee842e --- /dev/null +++ b/testdata/dnn/onnx/models/div_test_1x1.onnx @@ -0,0 +1,16 @@ +:w + +input1 +input2output"Divdiv_testZ +input1 +  + +Z +input2 +  + +b +output +  + +B \ No newline at end of file